
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos Maven / Gradle / Ivy
// Generated by the protocol buffer org.apache.hadoop.shaded.com.iler. DO NOT EDIT!
// source: ClientDatanodeProtocol.proto
package org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto;
public final class ClientDatanodeProtocolProtos {
private ClientDatanodeProtocolProtos() {}
public static void registerAllExtensions(
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistry registry) {
}
public interface GetReplicaVisibleLengthRequestProtoOrBuilder
extends org.apache.hadoop.shaded.com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.ExtendedBlockProto block = 1;
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
boolean hasBlock();
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock();
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.GetReplicaVisibleLengthRequestProto}
*
*
**
* block - block for which visible length is requested
*
*/
public static final class GetReplicaVisibleLengthRequestProto extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage
implements GetReplicaVisibleLengthRequestProtoOrBuilder {
// Use GetReplicaVisibleLengthRequestProto.newBuilder() to construct.
private GetReplicaVisibleLengthRequestProto(org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private GetReplicaVisibleLengthRequestProto(boolean noInit) { this.unknownFields = org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final GetReplicaVisibleLengthRequestProto defaultInstance;
public static GetReplicaVisibleLengthRequestProto getDefaultInstance() {
return defaultInstance;
}
public GetReplicaVisibleLengthRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetReplicaVisibleLengthRequestProto(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = block_.toBuilder();
}
block_ = input.readMessage(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(block_);
block_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
}
}
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReplicaVisibleLengthRequestProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReplicaVisibleLengthRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.Builder.class);
}
public static org.apache.hadoop.shaded.com.google.protobuf.Parser PARSER =
new org.apache.hadoop.shaded.com.google.protobuf.AbstractParser() {
public GetReplicaVisibleLengthRequestProto parsePartialFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return new GetReplicaVisibleLengthRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public org.apache.hadoop.shaded.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.ExtendedBlockProto block = 1;
public static final int BLOCK_FIELD_NUMBER = 1;
private org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_;
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public boolean hasBlock() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
return block_;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
return block_;
}
private void initFields() {
block_ = org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasBlock()) {
memoizedIsInitialized = 0;
return false;
}
if (!getBlock().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, block_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteMessageSize(1, block_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.org.apache.hadoop.shaded.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto) obj;
boolean result = true;
result = result && (hasBlock() == other.hasBlock());
if (hasBlock()) {
result = result && getBlock()
.equals(other.getBlock());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasBlock()) {
hash = (37 * hash) + BLOCK_FIELD_NUMBER;
hash = (53 * hash) + getBlock().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.GetReplicaVisibleLengthRequestProto}
*
*
**
* block - block for which visible length is requested
*
*/
public static final class Builder extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProtoOrBuilder {
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReplicaVisibleLengthRequestProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReplicaVisibleLengthRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getBlockFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (blockBuilder_ == null) {
block_ = org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReplicaVisibleLengthRequestProto_descriptor;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.getDefaultInstance();
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (blockBuilder_ == null) {
result.block_ = block_;
} else {
result.block_ = blockBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(org.apache.hadoop.shaded.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.getDefaultInstance()) return this;
if (other.hasBlock()) {
mergeBlock(other.getBlock());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasBlock()) {
return false;
}
if (!getBlock().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.ExtendedBlockProto block = 1;
private org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_ = org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
private org.apache.hadoop.shaded.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_;
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public boolean hasBlock() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
if (blockBuilder_ == null) {
return block_;
} else {
return blockBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder setBlock(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
if (blockBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
block_ = value;
onChanged();
} else {
blockBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder setBlock(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) {
if (blockBuilder_ == null) {
block_ = builderForValue.build();
onChanged();
} else {
blockBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder mergeBlock(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
if (blockBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
block_ != org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) {
block_ =
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial();
} else {
block_ = value;
}
onChanged();
} else {
blockBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder clearBlock() {
if (blockBuilder_ == null) {
block_ = org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
onChanged();
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getBlockFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
if (blockBuilder_ != null) {
return blockBuilder_.getMessageOrBuilder();
} else {
return block_;
}
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
private org.apache.hadoop.shaded.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>
getBlockFieldBuilder() {
if (blockBuilder_ == null) {
blockBuilder_ = new org.apache.hadoop.shaded.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>(
block_,
getParentForChildren(),
isClean());
block_ = null;
}
return blockBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetReplicaVisibleLengthRequestProto)
}
static {
defaultInstance = new GetReplicaVisibleLengthRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetReplicaVisibleLengthRequestProto)
}
public interface GetReplicaVisibleLengthResponseProtoOrBuilder
extends org.apache.hadoop.shaded.com.google.protobuf.MessageOrBuilder {
// required uint64 length = 1;
/**
* required uint64 length = 1;
*/
boolean hasLength();
/**
* required uint64 length = 1;
*/
long getLength();
}
/**
* Protobuf type {@code hadoop.hdfs.GetReplicaVisibleLengthResponseProto}
*
*
**
* length - visible length of the block
*
*/
public static final class GetReplicaVisibleLengthResponseProto extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage
implements GetReplicaVisibleLengthResponseProtoOrBuilder {
// Use GetReplicaVisibleLengthResponseProto.newBuilder() to construct.
private GetReplicaVisibleLengthResponseProto(org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private GetReplicaVisibleLengthResponseProto(boolean noInit) { this.unknownFields = org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final GetReplicaVisibleLengthResponseProto defaultInstance;
public static GetReplicaVisibleLengthResponseProto getDefaultInstance() {
return defaultInstance;
}
public GetReplicaVisibleLengthResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetReplicaVisibleLengthResponseProto(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
length_ = input.readUInt64();
break;
}
}
}
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReplicaVisibleLengthResponseProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReplicaVisibleLengthResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.Builder.class);
}
public static org.apache.hadoop.shaded.com.google.protobuf.Parser PARSER =
new org.apache.hadoop.shaded.com.google.protobuf.AbstractParser() {
public GetReplicaVisibleLengthResponseProto parsePartialFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return new GetReplicaVisibleLengthResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public org.apache.hadoop.shaded.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required uint64 length = 1;
public static final int LENGTH_FIELD_NUMBER = 1;
private long length_;
/**
* required uint64 length = 1;
*/
public boolean hasLength() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint64 length = 1;
*/
public long getLength() {
return length_;
}
private void initFields() {
length_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasLength()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt64(1, length_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteUInt64Size(1, length_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.org.apache.hadoop.shaded.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto) obj;
boolean result = true;
result = result && (hasLength() == other.hasLength());
if (hasLength()) {
result = result && (getLength()
== other.getLength());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasLength()) {
hash = (37 * hash) + LENGTH_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getLength());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.GetReplicaVisibleLengthResponseProto}
*
*
**
* length - visible length of the block
*
*/
public static final class Builder extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProtoOrBuilder {
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReplicaVisibleLengthResponseProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReplicaVisibleLengthResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
length_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReplicaVisibleLengthResponseProto_descriptor;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance();
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.length_ = length_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(org.apache.hadoop.shaded.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance()) return this;
if (other.hasLength()) {
setLength(other.getLength());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasLength()) {
return false;
}
return true;
}
public Builder mergeFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required uint64 length = 1;
private long length_ ;
/**
* required uint64 length = 1;
*/
public boolean hasLength() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint64 length = 1;
*/
public long getLength() {
return length_;
}
/**
* required uint64 length = 1;
*/
public Builder setLength(long value) {
bitField0_ |= 0x00000001;
length_ = value;
onChanged();
return this;
}
/**
* required uint64 length = 1;
*/
public Builder clearLength() {
bitField0_ = (bitField0_ & ~0x00000001);
length_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetReplicaVisibleLengthResponseProto)
}
static {
defaultInstance = new GetReplicaVisibleLengthResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetReplicaVisibleLengthResponseProto)
}
public interface RefreshNamenodesRequestProtoOrBuilder
extends org.apache.hadoop.shaded.com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.RefreshNamenodesRequestProto}
*
*
**
* void request
*
*/
public static final class RefreshNamenodesRequestProto extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage
implements RefreshNamenodesRequestProtoOrBuilder {
// Use RefreshNamenodesRequestProto.newBuilder() to construct.
private RefreshNamenodesRequestProto(org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private RefreshNamenodesRequestProto(boolean noInit) { this.unknownFields = org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final RefreshNamenodesRequestProto defaultInstance;
public static RefreshNamenodesRequestProto getDefaultInstance() {
return defaultInstance;
}
public RefreshNamenodesRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private RefreshNamenodesRequestProto(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
initFields();
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNamenodesRequestProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNamenodesRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.Builder.class);
}
public static org.apache.hadoop.shaded.com.google.protobuf.Parser PARSER =
new org.apache.hadoop.shaded.com.google.protobuf.AbstractParser() {
public RefreshNamenodesRequestProto parsePartialFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return new RefreshNamenodesRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public org.apache.hadoop.shaded.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.org.apache.hadoop.shaded.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.RefreshNamenodesRequestProto}
*
*
**
* void request
*
*/
public static final class Builder extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProtoOrBuilder {
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNamenodesRequestProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNamenodesRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNamenodesRequestProto_descriptor;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.getDefaultInstance();
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(org.apache.hadoop.shaded.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.RefreshNamenodesRequestProto)
}
static {
defaultInstance = new RefreshNamenodesRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.RefreshNamenodesRequestProto)
}
public interface RefreshNamenodesResponseProtoOrBuilder
extends org.apache.hadoop.shaded.com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.RefreshNamenodesResponseProto}
*
*
**
* void response
*
*/
public static final class RefreshNamenodesResponseProto extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage
implements RefreshNamenodesResponseProtoOrBuilder {
// Use RefreshNamenodesResponseProto.newBuilder() to construct.
private RefreshNamenodesResponseProto(org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private RefreshNamenodesResponseProto(boolean noInit) { this.unknownFields = org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final RefreshNamenodesResponseProto defaultInstance;
public static RefreshNamenodesResponseProto getDefaultInstance() {
return defaultInstance;
}
public RefreshNamenodesResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private RefreshNamenodesResponseProto(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
initFields();
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNamenodesResponseProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNamenodesResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.Builder.class);
}
public static org.apache.hadoop.shaded.com.google.protobuf.Parser PARSER =
new org.apache.hadoop.shaded.com.google.protobuf.AbstractParser() {
public RefreshNamenodesResponseProto parsePartialFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return new RefreshNamenodesResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public org.apache.hadoop.shaded.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.org.apache.hadoop.shaded.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.RefreshNamenodesResponseProto}
*
*
**
* void response
*
*/
public static final class Builder extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProtoOrBuilder {
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNamenodesResponseProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNamenodesResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNamenodesResponseProto_descriptor;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance();
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(org.apache.hadoop.shaded.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.RefreshNamenodesResponseProto)
}
static {
defaultInstance = new RefreshNamenodesResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.RefreshNamenodesResponseProto)
}
public interface DeleteBlockPoolRequestProtoOrBuilder
extends org.apache.hadoop.shaded.com.google.protobuf.MessageOrBuilder {
// required string blockPool = 1;
/**
* required string blockPool = 1;
*/
boolean hasBlockPool();
/**
* required string blockPool = 1;
*/
java.lang.String getBlockPool();
/**
* required string blockPool = 1;
*/
org.apache.hadoop.shaded.com.google.protobuf.ByteString
getBlockPoolBytes();
// required bool force = 2;
/**
* required bool force = 2;
*/
boolean hasForce();
/**
* required bool force = 2;
*/
boolean getForce();
}
/**
* Protobuf type {@code hadoop.hdfs.DeleteBlockPoolRequestProto}
*
*
**
* blockPool - block pool to be deleted
* force - if false, delete the block pool only if it is empty.
* if true, delete the block pool even if it has blocks.
*
*/
public static final class DeleteBlockPoolRequestProto extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage
implements DeleteBlockPoolRequestProtoOrBuilder {
// Use DeleteBlockPoolRequestProto.newBuilder() to construct.
private DeleteBlockPoolRequestProto(org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private DeleteBlockPoolRequestProto(boolean noInit) { this.unknownFields = org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final DeleteBlockPoolRequestProto defaultInstance;
public static DeleteBlockPoolRequestProto getDefaultInstance() {
return defaultInstance;
}
public DeleteBlockPoolRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DeleteBlockPoolRequestProto(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
blockPool_ = input.readBytes();
break;
}
case 16: {
bitField0_ |= 0x00000002;
force_ = input.readBool();
break;
}
}
}
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DeleteBlockPoolRequestProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DeleteBlockPoolRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.Builder.class);
}
public static org.apache.hadoop.shaded.com.google.protobuf.Parser PARSER =
new org.apache.hadoop.shaded.com.google.protobuf.AbstractParser() {
public DeleteBlockPoolRequestProto parsePartialFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return new DeleteBlockPoolRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public org.apache.hadoop.shaded.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required string blockPool = 1;
public static final int BLOCKPOOL_FIELD_NUMBER = 1;
private java.lang.Object blockPool_;
/**
* required string blockPool = 1;
*/
public boolean hasBlockPool() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string blockPool = 1;
*/
public java.lang.String getBlockPool() {
java.lang.Object ref = blockPool_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.shaded.com.google.protobuf.ByteString bs =
(org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
blockPool_ = s;
}
return s;
}
}
/**
* required string blockPool = 1;
*/
public org.apache.hadoop.shaded.com.google.protobuf.ByteString
getBlockPoolBytes() {
java.lang.Object ref = blockPool_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.shaded.com.google.protobuf.ByteString b =
org.apache.hadoop.shaded.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
blockPool_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
}
}
// required bool force = 2;
public static final int FORCE_FIELD_NUMBER = 2;
private boolean force_;
/**
* required bool force = 2;
*/
public boolean hasForce() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required bool force = 2;
*/
public boolean getForce() {
return force_;
}
private void initFields() {
blockPool_ = "";
force_ = false;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasBlockPool()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasForce()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getBlockPoolBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBool(2, force_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteBytesSize(1, getBlockPoolBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteBoolSize(2, force_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.org.apache.hadoop.shaded.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto) obj;
boolean result = true;
result = result && (hasBlockPool() == other.hasBlockPool());
if (hasBlockPool()) {
result = result && getBlockPool()
.equals(other.getBlockPool());
}
result = result && (hasForce() == other.hasForce());
if (hasForce()) {
result = result && (getForce()
== other.getForce());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasBlockPool()) {
hash = (37 * hash) + BLOCKPOOL_FIELD_NUMBER;
hash = (53 * hash) + getBlockPool().hashCode();
}
if (hasForce()) {
hash = (37 * hash) + FORCE_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getForce());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.DeleteBlockPoolRequestProto}
*
*
**
* blockPool - block pool to be deleted
* force - if false, delete the block pool only if it is empty.
* if true, delete the block pool even if it has blocks.
*
*/
public static final class Builder extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProtoOrBuilder {
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DeleteBlockPoolRequestProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DeleteBlockPoolRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
blockPool_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
force_ = false;
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DeleteBlockPoolRequestProto_descriptor;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.getDefaultInstance();
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.blockPool_ = blockPool_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.force_ = force_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(org.apache.hadoop.shaded.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.getDefaultInstance()) return this;
if (other.hasBlockPool()) {
bitField0_ |= 0x00000001;
blockPool_ = other.blockPool_;
onChanged();
}
if (other.hasForce()) {
setForce(other.getForce());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasBlockPool()) {
return false;
}
if (!hasForce()) {
return false;
}
return true;
}
public Builder mergeFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string blockPool = 1;
private java.lang.Object blockPool_ = "";
/**
* required string blockPool = 1;
*/
public boolean hasBlockPool() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string blockPool = 1;
*/
public java.lang.String getBlockPool() {
java.lang.Object ref = blockPool_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref)
.toStringUtf8();
blockPool_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string blockPool = 1;
*/
public org.apache.hadoop.shaded.com.google.protobuf.ByteString
getBlockPoolBytes() {
java.lang.Object ref = blockPool_;
if (ref instanceof String) {
org.apache.hadoop.shaded.com.google.protobuf.ByteString b =
org.apache.hadoop.shaded.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
blockPool_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
}
}
/**
* required string blockPool = 1;
*/
public Builder setBlockPool(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
blockPool_ = value;
onChanged();
return this;
}
/**
* required string blockPool = 1;
*/
public Builder clearBlockPool() {
bitField0_ = (bitField0_ & ~0x00000001);
blockPool_ = getDefaultInstance().getBlockPool();
onChanged();
return this;
}
/**
* required string blockPool = 1;
*/
public Builder setBlockPoolBytes(
org.apache.hadoop.shaded.com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
blockPool_ = value;
onChanged();
return this;
}
// required bool force = 2;
private boolean force_ ;
/**
* required bool force = 2;
*/
public boolean hasForce() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required bool force = 2;
*/
public boolean getForce() {
return force_;
}
/**
* required bool force = 2;
*/
public Builder setForce(boolean value) {
bitField0_ |= 0x00000002;
force_ = value;
onChanged();
return this;
}
/**
* required bool force = 2;
*/
public Builder clearForce() {
bitField0_ = (bitField0_ & ~0x00000002);
force_ = false;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DeleteBlockPoolRequestProto)
}
static {
defaultInstance = new DeleteBlockPoolRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DeleteBlockPoolRequestProto)
}
public interface DeleteBlockPoolResponseProtoOrBuilder
extends org.apache.hadoop.shaded.com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.DeleteBlockPoolResponseProto}
*
*
**
* void response
*
*/
public static final class DeleteBlockPoolResponseProto extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage
implements DeleteBlockPoolResponseProtoOrBuilder {
// Use DeleteBlockPoolResponseProto.newBuilder() to construct.
private DeleteBlockPoolResponseProto(org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private DeleteBlockPoolResponseProto(boolean noInit) { this.unknownFields = org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final DeleteBlockPoolResponseProto defaultInstance;
public static DeleteBlockPoolResponseProto getDefaultInstance() {
return defaultInstance;
}
public DeleteBlockPoolResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DeleteBlockPoolResponseProto(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
initFields();
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DeleteBlockPoolResponseProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DeleteBlockPoolResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.Builder.class);
}
public static org.apache.hadoop.shaded.com.google.protobuf.Parser PARSER =
new org.apache.hadoop.shaded.com.google.protobuf.AbstractParser() {
public DeleteBlockPoolResponseProto parsePartialFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return new DeleteBlockPoolResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public org.apache.hadoop.shaded.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.org.apache.hadoop.shaded.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.DeleteBlockPoolResponseProto}
*
*
**
* void response
*
*/
public static final class Builder extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProtoOrBuilder {
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DeleteBlockPoolResponseProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DeleteBlockPoolResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DeleteBlockPoolResponseProto_descriptor;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance();
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(org.apache.hadoop.shaded.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DeleteBlockPoolResponseProto)
}
static {
defaultInstance = new DeleteBlockPoolResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DeleteBlockPoolResponseProto)
}
public interface GetBlockLocalPathInfoRequestProtoOrBuilder
extends org.apache.hadoop.shaded.com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.ExtendedBlockProto block = 1;
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
boolean hasBlock();
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock();
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder();
// required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
/**
* required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
*/
boolean hasToken();
/**
* required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
*/
org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto getToken();
/**
* required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
*/
org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getTokenOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.GetBlockLocalPathInfoRequestProto}
*
*
**
* Gets the file information where block and its metadata is stored
* block - block for which path information is being requested
* token - block token
*
* This message is deprecated in favor of file descriptor passing.
*
*/
public static final class GetBlockLocalPathInfoRequestProto extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage
implements GetBlockLocalPathInfoRequestProtoOrBuilder {
// Use GetBlockLocalPathInfoRequestProto.newBuilder() to construct.
private GetBlockLocalPathInfoRequestProto(org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private GetBlockLocalPathInfoRequestProto(boolean noInit) { this.unknownFields = org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final GetBlockLocalPathInfoRequestProto defaultInstance;
public static GetBlockLocalPathInfoRequestProto getDefaultInstance() {
return defaultInstance;
}
public GetBlockLocalPathInfoRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetBlockLocalPathInfoRequestProto(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = block_.toBuilder();
}
block_ = input.readMessage(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(block_);
block_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
subBuilder = token_.toBuilder();
}
token_ = input.readMessage(org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(token_);
token_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
}
}
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocalPathInfoRequestProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocalPathInfoRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.Builder.class);
}
public static org.apache.hadoop.shaded.com.google.protobuf.Parser PARSER =
new org.apache.hadoop.shaded.com.google.protobuf.AbstractParser() {
public GetBlockLocalPathInfoRequestProto parsePartialFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return new GetBlockLocalPathInfoRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public org.apache.hadoop.shaded.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.ExtendedBlockProto block = 1;
public static final int BLOCK_FIELD_NUMBER = 1;
private org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_;
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public boolean hasBlock() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
return block_;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
return block_;
}
// required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
public static final int TOKEN_FIELD_NUMBER = 2;
private org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto token_;
/**
* required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
*/
public boolean hasToken() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto getToken() {
return token_;
}
/**
* required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getTokenOrBuilder() {
return token_;
}
private void initFields() {
block_ = org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
token_ = org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasBlock()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasToken()) {
memoizedIsInitialized = 0;
return false;
}
if (!getBlock().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
if (!getToken().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, block_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeMessage(2, token_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteMessageSize(1, block_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteMessageSize(2, token_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.org.apache.hadoop.shaded.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto) obj;
boolean result = true;
result = result && (hasBlock() == other.hasBlock());
if (hasBlock()) {
result = result && getBlock()
.equals(other.getBlock());
}
result = result && (hasToken() == other.hasToken());
if (hasToken()) {
result = result && getToken()
.equals(other.getToken());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasBlock()) {
hash = (37 * hash) + BLOCK_FIELD_NUMBER;
hash = (53 * hash) + getBlock().hashCode();
}
if (hasToken()) {
hash = (37 * hash) + TOKEN_FIELD_NUMBER;
hash = (53 * hash) + getToken().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.GetBlockLocalPathInfoRequestProto}
*
*
**
* Gets the file information where block and its metadata is stored
* block - block for which path information is being requested
* token - block token
*
* This message is deprecated in favor of file descriptor passing.
*
*/
public static final class Builder extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProtoOrBuilder {
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocalPathInfoRequestProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocalPathInfoRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getBlockFieldBuilder();
getTokenFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (blockBuilder_ == null) {
block_ = org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
if (tokenBuilder_ == null) {
token_ = org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance();
} else {
tokenBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocalPathInfoRequestProto_descriptor;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.getDefaultInstance();
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (blockBuilder_ == null) {
result.block_ = block_;
} else {
result.block_ = blockBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
if (tokenBuilder_ == null) {
result.token_ = token_;
} else {
result.token_ = tokenBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(org.apache.hadoop.shaded.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.getDefaultInstance()) return this;
if (other.hasBlock()) {
mergeBlock(other.getBlock());
}
if (other.hasToken()) {
mergeToken(other.getToken());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasBlock()) {
return false;
}
if (!hasToken()) {
return false;
}
if (!getBlock().isInitialized()) {
return false;
}
if (!getToken().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.ExtendedBlockProto block = 1;
private org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_ = org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
private org.apache.hadoop.shaded.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_;
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public boolean hasBlock() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
if (blockBuilder_ == null) {
return block_;
} else {
return blockBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder setBlock(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
if (blockBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
block_ = value;
onChanged();
} else {
blockBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder setBlock(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) {
if (blockBuilder_ == null) {
block_ = builderForValue.build();
onChanged();
} else {
blockBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder mergeBlock(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
if (blockBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
block_ != org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) {
block_ =
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial();
} else {
block_ = value;
}
onChanged();
} else {
blockBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder clearBlock() {
if (blockBuilder_ == null) {
block_ = org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
onChanged();
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getBlockFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
if (blockBuilder_ != null) {
return blockBuilder_.getMessageOrBuilder();
} else {
return block_;
}
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
private org.apache.hadoop.shaded.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>
getBlockFieldBuilder() {
if (blockBuilder_ == null) {
blockBuilder_ = new org.apache.hadoop.shaded.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>(
block_,
getParentForChildren(),
isClean());
block_ = null;
}
return blockBuilder_;
}
// required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
private org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto token_ = org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance();
private org.apache.hadoop.shaded.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> tokenBuilder_;
/**
* required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
*/
public boolean hasToken() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto getToken() {
if (tokenBuilder_ == null) {
return token_;
} else {
return tokenBuilder_.getMessage();
}
}
/**
* required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
*/
public Builder setToken(org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
if (tokenBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
token_ = value;
onChanged();
} else {
tokenBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
*/
public Builder setToken(
org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) {
if (tokenBuilder_ == null) {
token_ = builderForValue.build();
onChanged();
} else {
tokenBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
return this;
}
/**
* required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
*/
public Builder mergeToken(org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
if (tokenBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002) &&
token_ != org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance()) {
token_ =
org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto.newBuilder(token_).mergeFrom(value).buildPartial();
} else {
token_ = value;
}
onChanged();
} else {
tokenBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
*/
public Builder clearToken() {
if (tokenBuilder_ == null) {
token_ = org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance();
onChanged();
} else {
tokenBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
/**
* required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder getTokenBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getTokenFieldBuilder().getBuilder();
}
/**
* required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getTokenOrBuilder() {
if (tokenBuilder_ != null) {
return tokenBuilder_.getMessageOrBuilder();
} else {
return token_;
}
}
/**
* required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
*/
private org.apache.hadoop.shaded.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>
getTokenFieldBuilder() {
if (tokenBuilder_ == null) {
tokenBuilder_ = new org.apache.hadoop.shaded.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>(
token_,
getParentForChildren(),
isClean());
token_ = null;
}
return tokenBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetBlockLocalPathInfoRequestProto)
}
static {
defaultInstance = new GetBlockLocalPathInfoRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetBlockLocalPathInfoRequestProto)
}
public interface GetBlockLocalPathInfoResponseProtoOrBuilder
extends org.apache.hadoop.shaded.com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.ExtendedBlockProto block = 1;
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
boolean hasBlock();
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock();
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder();
// required string localPath = 2;
/**
* required string localPath = 2;
*/
boolean hasLocalPath();
/**
* required string localPath = 2;
*/
java.lang.String getLocalPath();
/**
* required string localPath = 2;
*/
org.apache.hadoop.shaded.com.google.protobuf.ByteString
getLocalPathBytes();
// required string localMetaPath = 3;
/**
* required string localMetaPath = 3;
*/
boolean hasLocalMetaPath();
/**
* required string localMetaPath = 3;
*/
java.lang.String getLocalMetaPath();
/**
* required string localMetaPath = 3;
*/
org.apache.hadoop.shaded.com.google.protobuf.ByteString
getLocalMetaPathBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.GetBlockLocalPathInfoResponseProto}
*
*
**
* block - block for which file path information is being returned
* localPath - file path where the block data is stored
* localMetaPath - file path where the block meta data is stored
*
* This message is deprecated in favor of file descriptor passing.
*
*/
public static final class GetBlockLocalPathInfoResponseProto extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage
implements GetBlockLocalPathInfoResponseProtoOrBuilder {
// Use GetBlockLocalPathInfoResponseProto.newBuilder() to construct.
private GetBlockLocalPathInfoResponseProto(org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private GetBlockLocalPathInfoResponseProto(boolean noInit) { this.unknownFields = org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final GetBlockLocalPathInfoResponseProto defaultInstance;
public static GetBlockLocalPathInfoResponseProto getDefaultInstance() {
return defaultInstance;
}
public GetBlockLocalPathInfoResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetBlockLocalPathInfoResponseProto(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = block_.toBuilder();
}
block_ = input.readMessage(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(block_);
block_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
bitField0_ |= 0x00000002;
localPath_ = input.readBytes();
break;
}
case 26: {
bitField0_ |= 0x00000004;
localMetaPath_ = input.readBytes();
break;
}
}
}
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocalPathInfoResponseProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocalPathInfoResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.Builder.class);
}
public static org.apache.hadoop.shaded.com.google.protobuf.Parser PARSER =
new org.apache.hadoop.shaded.com.google.protobuf.AbstractParser() {
public GetBlockLocalPathInfoResponseProto parsePartialFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return new GetBlockLocalPathInfoResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public org.apache.hadoop.shaded.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.ExtendedBlockProto block = 1;
public static final int BLOCK_FIELD_NUMBER = 1;
private org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_;
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public boolean hasBlock() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
return block_;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
return block_;
}
// required string localPath = 2;
public static final int LOCALPATH_FIELD_NUMBER = 2;
private java.lang.Object localPath_;
/**
* required string localPath = 2;
*/
public boolean hasLocalPath() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string localPath = 2;
*/
public java.lang.String getLocalPath() {
java.lang.Object ref = localPath_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.shaded.com.google.protobuf.ByteString bs =
(org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
localPath_ = s;
}
return s;
}
}
/**
* required string localPath = 2;
*/
public org.apache.hadoop.shaded.com.google.protobuf.ByteString
getLocalPathBytes() {
java.lang.Object ref = localPath_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.shaded.com.google.protobuf.ByteString b =
org.apache.hadoop.shaded.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
localPath_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
}
}
// required string localMetaPath = 3;
public static final int LOCALMETAPATH_FIELD_NUMBER = 3;
private java.lang.Object localMetaPath_;
/**
* required string localMetaPath = 3;
*/
public boolean hasLocalMetaPath() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required string localMetaPath = 3;
*/
public java.lang.String getLocalMetaPath() {
java.lang.Object ref = localMetaPath_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.shaded.com.google.protobuf.ByteString bs =
(org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
localMetaPath_ = s;
}
return s;
}
}
/**
* required string localMetaPath = 3;
*/
public org.apache.hadoop.shaded.com.google.protobuf.ByteString
getLocalMetaPathBytes() {
java.lang.Object ref = localMetaPath_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.shaded.com.google.protobuf.ByteString b =
org.apache.hadoop.shaded.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
localMetaPath_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
block_ = org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
localPath_ = "";
localMetaPath_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasBlock()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasLocalPath()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasLocalMetaPath()) {
memoizedIsInitialized = 0;
return false;
}
if (!getBlock().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, block_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getLocalPathBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, getLocalMetaPathBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteMessageSize(1, block_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteBytesSize(2, getLocalPathBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteBytesSize(3, getLocalMetaPathBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.org.apache.hadoop.shaded.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto) obj;
boolean result = true;
result = result && (hasBlock() == other.hasBlock());
if (hasBlock()) {
result = result && getBlock()
.equals(other.getBlock());
}
result = result && (hasLocalPath() == other.hasLocalPath());
if (hasLocalPath()) {
result = result && getLocalPath()
.equals(other.getLocalPath());
}
result = result && (hasLocalMetaPath() == other.hasLocalMetaPath());
if (hasLocalMetaPath()) {
result = result && getLocalMetaPath()
.equals(other.getLocalMetaPath());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasBlock()) {
hash = (37 * hash) + BLOCK_FIELD_NUMBER;
hash = (53 * hash) + getBlock().hashCode();
}
if (hasLocalPath()) {
hash = (37 * hash) + LOCALPATH_FIELD_NUMBER;
hash = (53 * hash) + getLocalPath().hashCode();
}
if (hasLocalMetaPath()) {
hash = (37 * hash) + LOCALMETAPATH_FIELD_NUMBER;
hash = (53 * hash) + getLocalMetaPath().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.GetBlockLocalPathInfoResponseProto}
*
*
**
* block - block for which file path information is being returned
* localPath - file path where the block data is stored
* localMetaPath - file path where the block meta data is stored
*
* This message is deprecated in favor of file descriptor passing.
*
*/
public static final class Builder extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProtoOrBuilder {
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocalPathInfoResponseProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocalPathInfoResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getBlockFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (blockBuilder_ == null) {
block_ = org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
localPath_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
localMetaPath_ = "";
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocalPathInfoResponseProto_descriptor;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance();
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (blockBuilder_ == null) {
result.block_ = block_;
} else {
result.block_ = blockBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.localPath_ = localPath_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.localMetaPath_ = localMetaPath_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(org.apache.hadoop.shaded.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance()) return this;
if (other.hasBlock()) {
mergeBlock(other.getBlock());
}
if (other.hasLocalPath()) {
bitField0_ |= 0x00000002;
localPath_ = other.localPath_;
onChanged();
}
if (other.hasLocalMetaPath()) {
bitField0_ |= 0x00000004;
localMetaPath_ = other.localMetaPath_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasBlock()) {
return false;
}
if (!hasLocalPath()) {
return false;
}
if (!hasLocalMetaPath()) {
return false;
}
if (!getBlock().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.ExtendedBlockProto block = 1;
private org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_ = org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
private org.apache.hadoop.shaded.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_;
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public boolean hasBlock() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
if (blockBuilder_ == null) {
return block_;
} else {
return blockBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder setBlock(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
if (blockBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
block_ = value;
onChanged();
} else {
blockBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder setBlock(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) {
if (blockBuilder_ == null) {
block_ = builderForValue.build();
onChanged();
} else {
blockBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder mergeBlock(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
if (blockBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
block_ != org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) {
block_ =
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial();
} else {
block_ = value;
}
onChanged();
} else {
blockBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder clearBlock() {
if (blockBuilder_ == null) {
block_ = org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
onChanged();
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getBlockFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
if (blockBuilder_ != null) {
return blockBuilder_.getMessageOrBuilder();
} else {
return block_;
}
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
private org.apache.hadoop.shaded.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>
getBlockFieldBuilder() {
if (blockBuilder_ == null) {
blockBuilder_ = new org.apache.hadoop.shaded.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>(
block_,
getParentForChildren(),
isClean());
block_ = null;
}
return blockBuilder_;
}
// required string localPath = 2;
private java.lang.Object localPath_ = "";
/**
* required string localPath = 2;
*/
public boolean hasLocalPath() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string localPath = 2;
*/
public java.lang.String getLocalPath() {
java.lang.Object ref = localPath_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref)
.toStringUtf8();
localPath_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string localPath = 2;
*/
public org.apache.hadoop.shaded.com.google.protobuf.ByteString
getLocalPathBytes() {
java.lang.Object ref = localPath_;
if (ref instanceof String) {
org.apache.hadoop.shaded.com.google.protobuf.ByteString b =
org.apache.hadoop.shaded.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
localPath_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
}
}
/**
* required string localPath = 2;
*/
public Builder setLocalPath(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
localPath_ = value;
onChanged();
return this;
}
/**
* required string localPath = 2;
*/
public Builder clearLocalPath() {
bitField0_ = (bitField0_ & ~0x00000002);
localPath_ = getDefaultInstance().getLocalPath();
onChanged();
return this;
}
/**
* required string localPath = 2;
*/
public Builder setLocalPathBytes(
org.apache.hadoop.shaded.com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
localPath_ = value;
onChanged();
return this;
}
// required string localMetaPath = 3;
private java.lang.Object localMetaPath_ = "";
/**
* required string localMetaPath = 3;
*/
public boolean hasLocalMetaPath() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required string localMetaPath = 3;
*/
public java.lang.String getLocalMetaPath() {
java.lang.Object ref = localMetaPath_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref)
.toStringUtf8();
localMetaPath_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string localMetaPath = 3;
*/
public org.apache.hadoop.shaded.com.google.protobuf.ByteString
getLocalMetaPathBytes() {
java.lang.Object ref = localMetaPath_;
if (ref instanceof String) {
org.apache.hadoop.shaded.com.google.protobuf.ByteString b =
org.apache.hadoop.shaded.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
localMetaPath_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
}
}
/**
* required string localMetaPath = 3;
*/
public Builder setLocalMetaPath(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
localMetaPath_ = value;
onChanged();
return this;
}
/**
* required string localMetaPath = 3;
*/
public Builder clearLocalMetaPath() {
bitField0_ = (bitField0_ & ~0x00000004);
localMetaPath_ = getDefaultInstance().getLocalMetaPath();
onChanged();
return this;
}
/**
* required string localMetaPath = 3;
*/
public Builder setLocalMetaPathBytes(
org.apache.hadoop.shaded.com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
localMetaPath_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetBlockLocalPathInfoResponseProto)
}
static {
defaultInstance = new GetBlockLocalPathInfoResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetBlockLocalPathInfoResponseProto)
}
public interface ShutdownDatanodeRequestProtoOrBuilder
extends org.apache.hadoop.shaded.com.google.protobuf.MessageOrBuilder {
// required bool forUpgrade = 1;
/**
* required bool forUpgrade = 1;
*/
boolean hasForUpgrade();
/**
* required bool forUpgrade = 1;
*/
boolean getForUpgrade();
}
/**
* Protobuf type {@code hadoop.hdfs.ShutdownDatanodeRequestProto}
*
*
**
* forUpgrade - if true, clients are advised to wait for restart and quick
* upgrade restart is instrumented. Otherwise, datanode does
* the regular shutdown.
*
*/
public static final class ShutdownDatanodeRequestProto extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage
implements ShutdownDatanodeRequestProtoOrBuilder {
// Use ShutdownDatanodeRequestProto.newBuilder() to construct.
private ShutdownDatanodeRequestProto(org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ShutdownDatanodeRequestProto(boolean noInit) { this.unknownFields = org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ShutdownDatanodeRequestProto defaultInstance;
public static ShutdownDatanodeRequestProto getDefaultInstance() {
return defaultInstance;
}
public ShutdownDatanodeRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ShutdownDatanodeRequestProto(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
forUpgrade_ = input.readBool();
break;
}
}
}
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_ShutdownDatanodeRequestProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_ShutdownDatanodeRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto.Builder.class);
}
public static org.apache.hadoop.shaded.com.google.protobuf.Parser PARSER =
new org.apache.hadoop.shaded.com.google.protobuf.AbstractParser() {
public ShutdownDatanodeRequestProto parsePartialFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return new ShutdownDatanodeRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public org.apache.hadoop.shaded.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required bool forUpgrade = 1;
public static final int FORUPGRADE_FIELD_NUMBER = 1;
private boolean forUpgrade_;
/**
* required bool forUpgrade = 1;
*/
public boolean hasForUpgrade() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required bool forUpgrade = 1;
*/
public boolean getForUpgrade() {
return forUpgrade_;
}
private void initFields() {
forUpgrade_ = false;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasForUpgrade()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBool(1, forUpgrade_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteBoolSize(1, forUpgrade_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.org.apache.hadoop.shaded.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto) obj;
boolean result = true;
result = result && (hasForUpgrade() == other.hasForUpgrade());
if (hasForUpgrade()) {
result = result && (getForUpgrade()
== other.getForUpgrade());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasForUpgrade()) {
hash = (37 * hash) + FORUPGRADE_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getForUpgrade());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ShutdownDatanodeRequestProto}
*
*
**
* forUpgrade - if true, clients are advised to wait for restart and quick
* upgrade restart is instrumented. Otherwise, datanode does
* the regular shutdown.
*
*/
public static final class Builder extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProtoOrBuilder {
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_ShutdownDatanodeRequestProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_ShutdownDatanodeRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
forUpgrade_ = false;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_ShutdownDatanodeRequestProto_descriptor;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto.getDefaultInstance();
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.forUpgrade_ = forUpgrade_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(org.apache.hadoop.shaded.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto.getDefaultInstance()) return this;
if (other.hasForUpgrade()) {
setForUpgrade(other.getForUpgrade());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasForUpgrade()) {
return false;
}
return true;
}
public Builder mergeFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required bool forUpgrade = 1;
private boolean forUpgrade_ ;
/**
* required bool forUpgrade = 1;
*/
public boolean hasForUpgrade() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required bool forUpgrade = 1;
*/
public boolean getForUpgrade() {
return forUpgrade_;
}
/**
* required bool forUpgrade = 1;
*/
public Builder setForUpgrade(boolean value) {
bitField0_ |= 0x00000001;
forUpgrade_ = value;
onChanged();
return this;
}
/**
* required bool forUpgrade = 1;
*/
public Builder clearForUpgrade() {
bitField0_ = (bitField0_ & ~0x00000001);
forUpgrade_ = false;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ShutdownDatanodeRequestProto)
}
static {
defaultInstance = new ShutdownDatanodeRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ShutdownDatanodeRequestProto)
}
public interface ShutdownDatanodeResponseProtoOrBuilder
extends org.apache.hadoop.shaded.com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.ShutdownDatanodeResponseProto}
*/
public static final class ShutdownDatanodeResponseProto extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage
implements ShutdownDatanodeResponseProtoOrBuilder {
// Use ShutdownDatanodeResponseProto.newBuilder() to construct.
private ShutdownDatanodeResponseProto(org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ShutdownDatanodeResponseProto(boolean noInit) { this.unknownFields = org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ShutdownDatanodeResponseProto defaultInstance;
public static ShutdownDatanodeResponseProto getDefaultInstance() {
return defaultInstance;
}
public ShutdownDatanodeResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ShutdownDatanodeResponseProto(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
initFields();
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_ShutdownDatanodeResponseProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_ShutdownDatanodeResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.Builder.class);
}
public static org.apache.hadoop.shaded.com.google.protobuf.Parser PARSER =
new org.apache.hadoop.shaded.com.google.protobuf.AbstractParser() {
public ShutdownDatanodeResponseProto parsePartialFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return new ShutdownDatanodeResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public org.apache.hadoop.shaded.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.org.apache.hadoop.shaded.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ShutdownDatanodeResponseProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProtoOrBuilder {
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_ShutdownDatanodeResponseProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_ShutdownDatanodeResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_ShutdownDatanodeResponseProto_descriptor;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.getDefaultInstance();
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(org.apache.hadoop.shaded.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ShutdownDatanodeResponseProto)
}
static {
defaultInstance = new ShutdownDatanodeResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ShutdownDatanodeResponseProto)
}
public interface EvictWritersRequestProtoOrBuilder
extends org.apache.hadoop.shaded.com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.EvictWritersRequestProto}
*
*
** Tell datanode to evict active clients that are writing
*
*/
public static final class EvictWritersRequestProto extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage
implements EvictWritersRequestProtoOrBuilder {
// Use EvictWritersRequestProto.newBuilder() to construct.
private EvictWritersRequestProto(org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private EvictWritersRequestProto(boolean noInit) { this.unknownFields = org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final EvictWritersRequestProto defaultInstance;
public static EvictWritersRequestProto getDefaultInstance() {
return defaultInstance;
}
public EvictWritersRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private EvictWritersRequestProto(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
initFields();
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_EvictWritersRequestProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_EvictWritersRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto.Builder.class);
}
public static org.apache.hadoop.shaded.com.google.protobuf.Parser PARSER =
new org.apache.hadoop.shaded.com.google.protobuf.AbstractParser() {
public EvictWritersRequestProto parsePartialFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return new EvictWritersRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public org.apache.hadoop.shaded.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.org.apache.hadoop.shaded.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.EvictWritersRequestProto}
*
*
** Tell datanode to evict active clients that are writing
*
*/
public static final class Builder extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProtoOrBuilder {
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_EvictWritersRequestProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_EvictWritersRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_EvictWritersRequestProto_descriptor;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto.getDefaultInstance();
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(org.apache.hadoop.shaded.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.EvictWritersRequestProto)
}
static {
defaultInstance = new EvictWritersRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.EvictWritersRequestProto)
}
public interface EvictWritersResponseProtoOrBuilder
extends org.apache.hadoop.shaded.com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.EvictWritersResponseProto}
*/
public static final class EvictWritersResponseProto extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage
implements EvictWritersResponseProtoOrBuilder {
// Use EvictWritersResponseProto.newBuilder() to construct.
private EvictWritersResponseProto(org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private EvictWritersResponseProto(boolean noInit) { this.unknownFields = org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final EvictWritersResponseProto defaultInstance;
public static EvictWritersResponseProto getDefaultInstance() {
return defaultInstance;
}
public EvictWritersResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private EvictWritersResponseProto(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
initFields();
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_EvictWritersResponseProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_EvictWritersResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto.Builder.class);
}
public static org.apache.hadoop.shaded.com.google.protobuf.Parser PARSER =
new org.apache.hadoop.shaded.com.google.protobuf.AbstractParser() {
public EvictWritersResponseProto parsePartialFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return new EvictWritersResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public org.apache.hadoop.shaded.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.org.apache.hadoop.shaded.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.EvictWritersResponseProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProtoOrBuilder {
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_EvictWritersResponseProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_EvictWritersResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_EvictWritersResponseProto_descriptor;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto.getDefaultInstance();
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(org.apache.hadoop.shaded.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.EvictWritersResponseProto)
}
static {
defaultInstance = new EvictWritersResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.EvictWritersResponseProto)
}
public interface GetDatanodeInfoRequestProtoOrBuilder
extends org.apache.hadoop.shaded.com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.GetDatanodeInfoRequestProto}
*
*
**
* Ping datanode for liveness and quick info
*
*/
public static final class GetDatanodeInfoRequestProto extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage
implements GetDatanodeInfoRequestProtoOrBuilder {
// Use GetDatanodeInfoRequestProto.newBuilder() to construct.
private GetDatanodeInfoRequestProto(org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private GetDatanodeInfoRequestProto(boolean noInit) { this.unknownFields = org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final GetDatanodeInfoRequestProto defaultInstance;
public static GetDatanodeInfoRequestProto getDefaultInstance() {
return defaultInstance;
}
public GetDatanodeInfoRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetDatanodeInfoRequestProto(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
initFields();
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeInfoRequestProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeInfoRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto.Builder.class);
}
public static org.apache.hadoop.shaded.com.google.protobuf.Parser PARSER =
new org.apache.hadoop.shaded.com.google.protobuf.AbstractParser() {
public GetDatanodeInfoRequestProto parsePartialFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return new GetDatanodeInfoRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public org.apache.hadoop.shaded.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.org.apache.hadoop.shaded.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.GetDatanodeInfoRequestProto}
*
*
**
* Ping datanode for liveness and quick info
*
*/
public static final class Builder extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProtoOrBuilder {
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeInfoRequestProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeInfoRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeInfoRequestProto_descriptor;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto.getDefaultInstance();
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(org.apache.hadoop.shaded.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetDatanodeInfoRequestProto)
}
static {
defaultInstance = new GetDatanodeInfoRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetDatanodeInfoRequestProto)
}
public interface GetDatanodeInfoResponseProtoOrBuilder
extends org.apache.hadoop.shaded.com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
boolean hasLocalInfo();
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto getLocalInfo();
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProtoOrBuilder getLocalInfoOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.GetDatanodeInfoResponseProto}
*/
public static final class GetDatanodeInfoResponseProto extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage
implements GetDatanodeInfoResponseProtoOrBuilder {
// Use GetDatanodeInfoResponseProto.newBuilder() to construct.
private GetDatanodeInfoResponseProto(org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private GetDatanodeInfoResponseProto(boolean noInit) { this.unknownFields = org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final GetDatanodeInfoResponseProto defaultInstance;
public static GetDatanodeInfoResponseProto getDefaultInstance() {
return defaultInstance;
}
public GetDatanodeInfoResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetDatanodeInfoResponseProto(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = localInfo_.toBuilder();
}
localInfo_ = input.readMessage(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(localInfo_);
localInfo_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
}
}
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeInfoResponseProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeInfoResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.Builder.class);
}
public static org.apache.hadoop.shaded.com.google.protobuf.Parser PARSER =
new org.apache.hadoop.shaded.com.google.protobuf.AbstractParser() {
public GetDatanodeInfoResponseProto parsePartialFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return new GetDatanodeInfoResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public org.apache.hadoop.shaded.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
public static final int LOCALINFO_FIELD_NUMBER = 1;
private org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto localInfo_;
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public boolean hasLocalInfo() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto getLocalInfo() {
return localInfo_;
}
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProtoOrBuilder getLocalInfoOrBuilder() {
return localInfo_;
}
private void initFields() {
localInfo_ = org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasLocalInfo()) {
memoizedIsInitialized = 0;
return false;
}
if (!getLocalInfo().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, localInfo_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteMessageSize(1, localInfo_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.org.apache.hadoop.shaded.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto) obj;
boolean result = true;
result = result && (hasLocalInfo() == other.hasLocalInfo());
if (hasLocalInfo()) {
result = result && getLocalInfo()
.equals(other.getLocalInfo());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasLocalInfo()) {
hash = (37 * hash) + LOCALINFO_FIELD_NUMBER;
hash = (53 * hash) + getLocalInfo().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.GetDatanodeInfoResponseProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProtoOrBuilder {
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeInfoResponseProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeInfoResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getLocalInfoFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (localInfoBuilder_ == null) {
localInfo_ = org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.getDefaultInstance();
} else {
localInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeInfoResponseProto_descriptor;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.getDefaultInstance();
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (localInfoBuilder_ == null) {
result.localInfo_ = localInfo_;
} else {
result.localInfo_ = localInfoBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(org.apache.hadoop.shaded.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.getDefaultInstance()) return this;
if (other.hasLocalInfo()) {
mergeLocalInfo(other.getLocalInfo());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasLocalInfo()) {
return false;
}
if (!getLocalInfo().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
private org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto localInfo_ = org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.getDefaultInstance();
private org.apache.hadoop.shaded.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProtoOrBuilder> localInfoBuilder_;
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public boolean hasLocalInfo() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto getLocalInfo() {
if (localInfoBuilder_ == null) {
return localInfo_;
} else {
return localInfoBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public Builder setLocalInfo(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto value) {
if (localInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
localInfo_ = value;
onChanged();
} else {
localInfoBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public Builder setLocalInfo(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.Builder builderForValue) {
if (localInfoBuilder_ == null) {
localInfo_ = builderForValue.build();
onChanged();
} else {
localInfoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public Builder mergeLocalInfo(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto value) {
if (localInfoBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
localInfo_ != org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.getDefaultInstance()) {
localInfo_ =
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.newBuilder(localInfo_).mergeFrom(value).buildPartial();
} else {
localInfo_ = value;
}
onChanged();
} else {
localInfoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public Builder clearLocalInfo() {
if (localInfoBuilder_ == null) {
localInfo_ = org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.getDefaultInstance();
onChanged();
} else {
localInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.Builder getLocalInfoBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getLocalInfoFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProtoOrBuilder getLocalInfoOrBuilder() {
if (localInfoBuilder_ != null) {
return localInfoBuilder_.getMessageOrBuilder();
} else {
return localInfo_;
}
}
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
private org.apache.hadoop.shaded.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProtoOrBuilder>
getLocalInfoFieldBuilder() {
if (localInfoBuilder_ == null) {
localInfoBuilder_ = new org.apache.hadoop.shaded.com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProtoOrBuilder>(
localInfo_,
getParentForChildren(),
isClean());
localInfo_ = null;
}
return localInfoBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetDatanodeInfoResponseProto)
}
static {
defaultInstance = new GetDatanodeInfoResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetDatanodeInfoResponseProto)
}
public interface GetVolumeReportRequestProtoOrBuilder
extends org.apache.hadoop.shaded.com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.GetVolumeReportRequestProto}
*/
public static final class GetVolumeReportRequestProto extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage
implements GetVolumeReportRequestProtoOrBuilder {
// Use GetVolumeReportRequestProto.newBuilder() to construct.
private GetVolumeReportRequestProto(org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private GetVolumeReportRequestProto(boolean noInit) { this.unknownFields = org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final GetVolumeReportRequestProto defaultInstance;
public static GetVolumeReportRequestProto getDefaultInstance() {
return defaultInstance;
}
public GetVolumeReportRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetVolumeReportRequestProto(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
initFields();
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetVolumeReportRequestProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetVolumeReportRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto.Builder.class);
}
public static org.apache.hadoop.shaded.com.google.protobuf.Parser PARSER =
new org.apache.hadoop.shaded.com.google.protobuf.AbstractParser() {
public GetVolumeReportRequestProto parsePartialFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return new GetVolumeReportRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public org.apache.hadoop.shaded.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.org.apache.hadoop.shaded.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.GetVolumeReportRequestProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProtoOrBuilder {
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetVolumeReportRequestProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetVolumeReportRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetVolumeReportRequestProto_descriptor;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto.getDefaultInstance();
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(org.apache.hadoop.shaded.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetVolumeReportRequestProto)
}
static {
defaultInstance = new GetVolumeReportRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetVolumeReportRequestProto)
}
public interface GetVolumeReportResponseProtoOrBuilder
extends org.apache.hadoop.shaded.com.google.protobuf.MessageOrBuilder {
// repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
java.util.List
getVolumeInfoList();
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto getVolumeInfo(int index);
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
int getVolumeInfoCount();
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
java.util.List extends org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProtoOrBuilder>
getVolumeInfoOrBuilderList();
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProtoOrBuilder getVolumeInfoOrBuilder(
int index);
}
/**
* Protobuf type {@code hadoop.hdfs.GetVolumeReportResponseProto}
*/
public static final class GetVolumeReportResponseProto extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage
implements GetVolumeReportResponseProtoOrBuilder {
// Use GetVolumeReportResponseProto.newBuilder() to construct.
private GetVolumeReportResponseProto(org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private GetVolumeReportResponseProto(boolean noInit) { this.unknownFields = org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final GetVolumeReportResponseProto defaultInstance;
public static GetVolumeReportResponseProto getDefaultInstance() {
return defaultInstance;
}
public GetVolumeReportResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetVolumeReportResponseProto(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
volumeInfo_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
volumeInfo_.add(input.readMessage(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.PARSER, extensionRegistry));
break;
}
}
}
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
volumeInfo_ = java.util.Collections.unmodifiableList(volumeInfo_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetVolumeReportResponseProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetVolumeReportResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto.Builder.class);
}
public static org.apache.hadoop.shaded.com.google.protobuf.Parser PARSER =
new org.apache.hadoop.shaded.com.google.protobuf.AbstractParser() {
public GetVolumeReportResponseProto parsePartialFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return new GetVolumeReportResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public org.apache.hadoop.shaded.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
// repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
public static final int VOLUMEINFO_FIELD_NUMBER = 1;
private java.util.List volumeInfo_;
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public java.util.List getVolumeInfoList() {
return volumeInfo_;
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public java.util.List extends org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProtoOrBuilder>
getVolumeInfoOrBuilderList() {
return volumeInfo_;
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public int getVolumeInfoCount() {
return volumeInfo_.size();
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto getVolumeInfo(int index) {
return volumeInfo_.get(index);
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProtoOrBuilder getVolumeInfoOrBuilder(
int index) {
return volumeInfo_.get(index);
}
private void initFields() {
volumeInfo_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
for (int i = 0; i < getVolumeInfoCount(); i++) {
if (!getVolumeInfo(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
getSerializedSize();
for (int i = 0; i < volumeInfo_.size(); i++) {
output.writeMessage(1, volumeInfo_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
for (int i = 0; i < volumeInfo_.size(); i++) {
size += org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteMessageSize(1, volumeInfo_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.org.apache.hadoop.shaded.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto) obj;
boolean result = true;
result = result && getVolumeInfoList()
.equals(other.getVolumeInfoList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (getVolumeInfoCount() > 0) {
hash = (37 * hash) + VOLUMEINFO_FIELD_NUMBER;
hash = (53 * hash) + getVolumeInfoList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.GetVolumeReportResponseProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProtoOrBuilder {
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetVolumeReportResponseProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetVolumeReportResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getVolumeInfoFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (volumeInfoBuilder_ == null) {
volumeInfo_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
} else {
volumeInfoBuilder_.clear();
}
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetVolumeReportResponseProto_descriptor;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto.getDefaultInstance();
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto(this);
int from_bitField0_ = bitField0_;
if (volumeInfoBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001)) {
volumeInfo_ = java.util.Collections.unmodifiableList(volumeInfo_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.volumeInfo_ = volumeInfo_;
} else {
result.volumeInfo_ = volumeInfoBuilder_.build();
}
onBuilt();
return result;
}
public Builder mergeFrom(org.apache.hadoop.shaded.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto.getDefaultInstance()) return this;
if (volumeInfoBuilder_ == null) {
if (!other.volumeInfo_.isEmpty()) {
if (volumeInfo_.isEmpty()) {
volumeInfo_ = other.volumeInfo_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureVolumeInfoIsMutable();
volumeInfo_.addAll(other.volumeInfo_);
}
onChanged();
}
} else {
if (!other.volumeInfo_.isEmpty()) {
if (volumeInfoBuilder_.isEmpty()) {
volumeInfoBuilder_.dispose();
volumeInfoBuilder_ = null;
volumeInfo_ = other.volumeInfo_;
bitField0_ = (bitField0_ & ~0x00000001);
volumeInfoBuilder_ =
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getVolumeInfoFieldBuilder() : null;
} else {
volumeInfoBuilder_.addAllMessages(other.volumeInfo_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
for (int i = 0; i < getVolumeInfoCount(); i++) {
if (!getVolumeInfo(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
private java.util.List volumeInfo_ =
java.util.Collections.emptyList();
private void ensureVolumeInfoIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
volumeInfo_ = new java.util.ArrayList(volumeInfo_);
bitField0_ |= 0x00000001;
}
}
private org.apache.hadoop.shaded.com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProtoOrBuilder> volumeInfoBuilder_;
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public java.util.List getVolumeInfoList() {
if (volumeInfoBuilder_ == null) {
return java.util.Collections.unmodifiableList(volumeInfo_);
} else {
return volumeInfoBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public int getVolumeInfoCount() {
if (volumeInfoBuilder_ == null) {
return volumeInfo_.size();
} else {
return volumeInfoBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto getVolumeInfo(int index) {
if (volumeInfoBuilder_ == null) {
return volumeInfo_.get(index);
} else {
return volumeInfoBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public Builder setVolumeInfo(
int index, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto value) {
if (volumeInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureVolumeInfoIsMutable();
volumeInfo_.set(index, value);
onChanged();
} else {
volumeInfoBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public Builder setVolumeInfo(
int index, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.Builder builderForValue) {
if (volumeInfoBuilder_ == null) {
ensureVolumeInfoIsMutable();
volumeInfo_.set(index, builderForValue.build());
onChanged();
} else {
volumeInfoBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public Builder addVolumeInfo(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto value) {
if (volumeInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureVolumeInfoIsMutable();
volumeInfo_.add(value);
onChanged();
} else {
volumeInfoBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public Builder addVolumeInfo(
int index, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto value) {
if (volumeInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureVolumeInfoIsMutable();
volumeInfo_.add(index, value);
onChanged();
} else {
volumeInfoBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public Builder addVolumeInfo(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.Builder builderForValue) {
if (volumeInfoBuilder_ == null) {
ensureVolumeInfoIsMutable();
volumeInfo_.add(builderForValue.build());
onChanged();
} else {
volumeInfoBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public Builder addVolumeInfo(
int index, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.Builder builderForValue) {
if (volumeInfoBuilder_ == null) {
ensureVolumeInfoIsMutable();
volumeInfo_.add(index, builderForValue.build());
onChanged();
} else {
volumeInfoBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public Builder addAllVolumeInfo(
java.lang.Iterable extends org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto> values) {
if (volumeInfoBuilder_ == null) {
ensureVolumeInfoIsMutable();
super.addAll(values, volumeInfo_);
onChanged();
} else {
volumeInfoBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public Builder clearVolumeInfo() {
if (volumeInfoBuilder_ == null) {
volumeInfo_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
} else {
volumeInfoBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public Builder removeVolumeInfo(int index) {
if (volumeInfoBuilder_ == null) {
ensureVolumeInfoIsMutable();
volumeInfo_.remove(index);
onChanged();
} else {
volumeInfoBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.Builder getVolumeInfoBuilder(
int index) {
return getVolumeInfoFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProtoOrBuilder getVolumeInfoOrBuilder(
int index) {
if (volumeInfoBuilder_ == null) {
return volumeInfo_.get(index); } else {
return volumeInfoBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public java.util.List extends org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProtoOrBuilder>
getVolumeInfoOrBuilderList() {
if (volumeInfoBuilder_ != null) {
return volumeInfoBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(volumeInfo_);
}
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.Builder addVolumeInfoBuilder() {
return getVolumeInfoFieldBuilder().addBuilder(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.Builder addVolumeInfoBuilder(
int index) {
return getVolumeInfoFieldBuilder().addBuilder(
index, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public java.util.List
getVolumeInfoBuilderList() {
return getVolumeInfoFieldBuilder().getBuilderList();
}
private org.apache.hadoop.shaded.com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProtoOrBuilder>
getVolumeInfoFieldBuilder() {
if (volumeInfoBuilder_ == null) {
volumeInfoBuilder_ = new org.apache.hadoop.shaded.com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProtoOrBuilder>(
volumeInfo_,
((bitField0_ & 0x00000001) == 0x00000001),
getParentForChildren(),
isClean());
volumeInfo_ = null;
}
return volumeInfoBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetVolumeReportResponseProto)
}
static {
defaultInstance = new GetVolumeReportResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetVolumeReportResponseProto)
}
public interface TriggerBlockReportRequestProtoOrBuilder
extends org.apache.hadoop.shaded.com.google.protobuf.MessageOrBuilder {
// required bool incremental = 1;
/**
* required bool incremental = 1;
*/
boolean hasIncremental();
/**
* required bool incremental = 1;
*/
boolean getIncremental();
// optional string nnAddress = 2;
/**
* optional string nnAddress = 2;
*/
boolean hasNnAddress();
/**
* optional string nnAddress = 2;
*/
java.lang.String getNnAddress();
/**
* optional string nnAddress = 2;
*/
org.apache.hadoop.shaded.com.google.protobuf.ByteString
getNnAddressBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.TriggerBlockReportRequestProto}
*/
public static final class TriggerBlockReportRequestProto extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage
implements TriggerBlockReportRequestProtoOrBuilder {
// Use TriggerBlockReportRequestProto.newBuilder() to construct.
private TriggerBlockReportRequestProto(org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private TriggerBlockReportRequestProto(boolean noInit) { this.unknownFields = org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final TriggerBlockReportRequestProto defaultInstance;
public static TriggerBlockReportRequestProto getDefaultInstance() {
return defaultInstance;
}
public TriggerBlockReportRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private TriggerBlockReportRequestProto(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
incremental_ = input.readBool();
break;
}
case 18: {
bitField0_ |= 0x00000002;
nnAddress_ = input.readBytes();
break;
}
}
}
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_TriggerBlockReportRequestProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_TriggerBlockReportRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto.Builder.class);
}
public static org.apache.hadoop.shaded.com.google.protobuf.Parser PARSER =
new org.apache.hadoop.shaded.com.google.protobuf.AbstractParser() {
public TriggerBlockReportRequestProto parsePartialFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return new TriggerBlockReportRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public org.apache.hadoop.shaded.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required bool incremental = 1;
public static final int INCREMENTAL_FIELD_NUMBER = 1;
private boolean incremental_;
/**
* required bool incremental = 1;
*/
public boolean hasIncremental() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required bool incremental = 1;
*/
public boolean getIncremental() {
return incremental_;
}
// optional string nnAddress = 2;
public static final int NNADDRESS_FIELD_NUMBER = 2;
private java.lang.Object nnAddress_;
/**
* optional string nnAddress = 2;
*/
public boolean hasNnAddress() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional string nnAddress = 2;
*/
public java.lang.String getNnAddress() {
java.lang.Object ref = nnAddress_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.shaded.com.google.protobuf.ByteString bs =
(org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
nnAddress_ = s;
}
return s;
}
}
/**
* optional string nnAddress = 2;
*/
public org.apache.hadoop.shaded.com.google.protobuf.ByteString
getNnAddressBytes() {
java.lang.Object ref = nnAddress_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.shaded.com.google.protobuf.ByteString b =
org.apache.hadoop.shaded.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
nnAddress_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
incremental_ = false;
nnAddress_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasIncremental()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBool(1, incremental_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getNnAddressBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteBoolSize(1, incremental_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteBytesSize(2, getNnAddressBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.org.apache.hadoop.shaded.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto) obj;
boolean result = true;
result = result && (hasIncremental() == other.hasIncremental());
if (hasIncremental()) {
result = result && (getIncremental()
== other.getIncremental());
}
result = result && (hasNnAddress() == other.hasNnAddress());
if (hasNnAddress()) {
result = result && getNnAddress()
.equals(other.getNnAddress());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasIncremental()) {
hash = (37 * hash) + INCREMENTAL_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getIncremental());
}
if (hasNnAddress()) {
hash = (37 * hash) + NNADDRESS_FIELD_NUMBER;
hash = (53 * hash) + getNnAddress().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.TriggerBlockReportRequestProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProtoOrBuilder {
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_TriggerBlockReportRequestProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_TriggerBlockReportRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
incremental_ = false;
bitField0_ = (bitField0_ & ~0x00000001);
nnAddress_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_TriggerBlockReportRequestProto_descriptor;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto.getDefaultInstance();
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.incremental_ = incremental_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.nnAddress_ = nnAddress_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(org.apache.hadoop.shaded.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto.getDefaultInstance()) return this;
if (other.hasIncremental()) {
setIncremental(other.getIncremental());
}
if (other.hasNnAddress()) {
bitField0_ |= 0x00000002;
nnAddress_ = other.nnAddress_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasIncremental()) {
return false;
}
return true;
}
public Builder mergeFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required bool incremental = 1;
private boolean incremental_ ;
/**
* required bool incremental = 1;
*/
public boolean hasIncremental() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required bool incremental = 1;
*/
public boolean getIncremental() {
return incremental_;
}
/**
* required bool incremental = 1;
*/
public Builder setIncremental(boolean value) {
bitField0_ |= 0x00000001;
incremental_ = value;
onChanged();
return this;
}
/**
* required bool incremental = 1;
*/
public Builder clearIncremental() {
bitField0_ = (bitField0_ & ~0x00000001);
incremental_ = false;
onChanged();
return this;
}
// optional string nnAddress = 2;
private java.lang.Object nnAddress_ = "";
/**
* optional string nnAddress = 2;
*/
public boolean hasNnAddress() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional string nnAddress = 2;
*/
public java.lang.String getNnAddress() {
java.lang.Object ref = nnAddress_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref)
.toStringUtf8();
nnAddress_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string nnAddress = 2;
*/
public org.apache.hadoop.shaded.com.google.protobuf.ByteString
getNnAddressBytes() {
java.lang.Object ref = nnAddress_;
if (ref instanceof String) {
org.apache.hadoop.shaded.com.google.protobuf.ByteString b =
org.apache.hadoop.shaded.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
nnAddress_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
}
}
/**
* optional string nnAddress = 2;
*/
public Builder setNnAddress(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
nnAddress_ = value;
onChanged();
return this;
}
/**
* optional string nnAddress = 2;
*/
public Builder clearNnAddress() {
bitField0_ = (bitField0_ & ~0x00000002);
nnAddress_ = getDefaultInstance().getNnAddress();
onChanged();
return this;
}
/**
* optional string nnAddress = 2;
*/
public Builder setNnAddressBytes(
org.apache.hadoop.shaded.com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
nnAddress_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.TriggerBlockReportRequestProto)
}
static {
defaultInstance = new TriggerBlockReportRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.TriggerBlockReportRequestProto)
}
public interface TriggerBlockReportResponseProtoOrBuilder
extends org.apache.hadoop.shaded.com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.TriggerBlockReportResponseProto}
*/
public static final class TriggerBlockReportResponseProto extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage
implements TriggerBlockReportResponseProtoOrBuilder {
// Use TriggerBlockReportResponseProto.newBuilder() to construct.
private TriggerBlockReportResponseProto(org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private TriggerBlockReportResponseProto(boolean noInit) { this.unknownFields = org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final TriggerBlockReportResponseProto defaultInstance;
public static TriggerBlockReportResponseProto getDefaultInstance() {
return defaultInstance;
}
public TriggerBlockReportResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private TriggerBlockReportResponseProto(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
initFields();
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_TriggerBlockReportResponseProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_TriggerBlockReportResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.Builder.class);
}
public static org.apache.hadoop.shaded.com.google.protobuf.Parser PARSER =
new org.apache.hadoop.shaded.com.google.protobuf.AbstractParser() {
public TriggerBlockReportResponseProto parsePartialFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return new TriggerBlockReportResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public org.apache.hadoop.shaded.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.org.apache.hadoop.shaded.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.TriggerBlockReportResponseProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProtoOrBuilder {
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_TriggerBlockReportResponseProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_TriggerBlockReportResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_TriggerBlockReportResponseProto_descriptor;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.getDefaultInstance();
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(org.apache.hadoop.shaded.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.TriggerBlockReportResponseProto)
}
static {
defaultInstance = new TriggerBlockReportResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.TriggerBlockReportResponseProto)
}
public interface GetBalancerBandwidthRequestProtoOrBuilder
extends org.apache.hadoop.shaded.com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.GetBalancerBandwidthRequestProto}
*/
public static final class GetBalancerBandwidthRequestProto extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage
implements GetBalancerBandwidthRequestProtoOrBuilder {
// Use GetBalancerBandwidthRequestProto.newBuilder() to construct.
private GetBalancerBandwidthRequestProto(org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private GetBalancerBandwidthRequestProto(boolean noInit) { this.unknownFields = org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final GetBalancerBandwidthRequestProto defaultInstance;
public static GetBalancerBandwidthRequestProto getDefaultInstance() {
return defaultInstance;
}
public GetBalancerBandwidthRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetBalancerBandwidthRequestProto(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
initFields();
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBalancerBandwidthRequestProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBalancerBandwidthRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto.Builder.class);
}
public static org.apache.hadoop.shaded.com.google.protobuf.Parser PARSER =
new org.apache.hadoop.shaded.com.google.protobuf.AbstractParser() {
public GetBalancerBandwidthRequestProto parsePartialFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return new GetBalancerBandwidthRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public org.apache.hadoop.shaded.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.org.apache.hadoop.shaded.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.GetBalancerBandwidthRequestProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProtoOrBuilder {
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBalancerBandwidthRequestProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBalancerBandwidthRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBalancerBandwidthRequestProto_descriptor;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto.getDefaultInstance();
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(org.apache.hadoop.shaded.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetBalancerBandwidthRequestProto)
}
static {
defaultInstance = new GetBalancerBandwidthRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetBalancerBandwidthRequestProto)
}
public interface GetBalancerBandwidthResponseProtoOrBuilder
extends org.apache.hadoop.shaded.com.google.protobuf.MessageOrBuilder {
// required uint64 bandwidth = 1;
/**
* required uint64 bandwidth = 1;
*/
boolean hasBandwidth();
/**
* required uint64 bandwidth = 1;
*/
long getBandwidth();
}
/**
* Protobuf type {@code hadoop.hdfs.GetBalancerBandwidthResponseProto}
*
*
**
* bandwidth - balancer bandwidth value of the datanode.
*
*/
public static final class GetBalancerBandwidthResponseProto extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage
implements GetBalancerBandwidthResponseProtoOrBuilder {
// Use GetBalancerBandwidthResponseProto.newBuilder() to construct.
private GetBalancerBandwidthResponseProto(org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private GetBalancerBandwidthResponseProto(boolean noInit) { this.unknownFields = org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final GetBalancerBandwidthResponseProto defaultInstance;
public static GetBalancerBandwidthResponseProto getDefaultInstance() {
return defaultInstance;
}
public GetBalancerBandwidthResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetBalancerBandwidthResponseProto(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
bandwidth_ = input.readUInt64();
break;
}
}
}
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBalancerBandwidthResponseProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBalancerBandwidthResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto.Builder.class);
}
public static org.apache.hadoop.shaded.com.google.protobuf.Parser PARSER =
new org.apache.hadoop.shaded.com.google.protobuf.AbstractParser() {
public GetBalancerBandwidthResponseProto parsePartialFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return new GetBalancerBandwidthResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public org.apache.hadoop.shaded.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required uint64 bandwidth = 1;
public static final int BANDWIDTH_FIELD_NUMBER = 1;
private long bandwidth_;
/**
* required uint64 bandwidth = 1;
*/
public boolean hasBandwidth() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint64 bandwidth = 1;
*/
public long getBandwidth() {
return bandwidth_;
}
private void initFields() {
bandwidth_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasBandwidth()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt64(1, bandwidth_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteUInt64Size(1, bandwidth_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.org.apache.hadoop.shaded.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto) obj;
boolean result = true;
result = result && (hasBandwidth() == other.hasBandwidth());
if (hasBandwidth()) {
result = result && (getBandwidth()
== other.getBandwidth());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasBandwidth()) {
hash = (37 * hash) + BANDWIDTH_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getBandwidth());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.GetBalancerBandwidthResponseProto}
*
*
**
* bandwidth - balancer bandwidth value of the datanode.
*
*/
public static final class Builder extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProtoOrBuilder {
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBalancerBandwidthResponseProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBalancerBandwidthResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
bandwidth_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBalancerBandwidthResponseProto_descriptor;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto.getDefaultInstance();
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.bandwidth_ = bandwidth_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(org.apache.hadoop.shaded.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto.getDefaultInstance()) return this;
if (other.hasBandwidth()) {
setBandwidth(other.getBandwidth());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasBandwidth()) {
return false;
}
return true;
}
public Builder mergeFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required uint64 bandwidth = 1;
private long bandwidth_ ;
/**
* required uint64 bandwidth = 1;
*/
public boolean hasBandwidth() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint64 bandwidth = 1;
*/
public long getBandwidth() {
return bandwidth_;
}
/**
* required uint64 bandwidth = 1;
*/
public Builder setBandwidth(long value) {
bitField0_ |= 0x00000001;
bandwidth_ = value;
onChanged();
return this;
}
/**
* required uint64 bandwidth = 1;
*/
public Builder clearBandwidth() {
bitField0_ = (bitField0_ & ~0x00000001);
bandwidth_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetBalancerBandwidthResponseProto)
}
static {
defaultInstance = new GetBalancerBandwidthResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetBalancerBandwidthResponseProto)
}
public interface SubmitDiskBalancerPlanRequestProtoOrBuilder
extends org.apache.hadoop.shaded.com.google.protobuf.MessageOrBuilder {
// required string planID = 1;
/**
* required string planID = 1;
*
*
* A hash of the plan like SHA-1
*
*/
boolean hasPlanID();
/**
* required string planID = 1;
*
*
* A hash of the plan like SHA-1
*
*/
java.lang.String getPlanID();
/**
* required string planID = 1;
*
*
* A hash of the plan like SHA-1
*
*/
org.apache.hadoop.shaded.com.google.protobuf.ByteString
getPlanIDBytes();
// required string plan = 2;
/**
* required string plan = 2;
*
*
* Plan file data in Json format
*
*/
boolean hasPlan();
/**
* required string plan = 2;
*
*
* Plan file data in Json format
*
*/
java.lang.String getPlan();
/**
* required string plan = 2;
*
*
* Plan file data in Json format
*
*/
org.apache.hadoop.shaded.com.google.protobuf.ByteString
getPlanBytes();
// optional uint64 planVersion = 3;
/**
* optional uint64 planVersion = 3;
*
*
* Plan version number
*
*/
boolean hasPlanVersion();
/**
* optional uint64 planVersion = 3;
*
*
* Plan version number
*
*/
long getPlanVersion();
// optional bool ignoreDateCheck = 4;
/**
* optional bool ignoreDateCheck = 4;
*
*
* Ignore date checks on this plan.
*
*/
boolean hasIgnoreDateCheck();
/**
* optional bool ignoreDateCheck = 4;
*
*
* Ignore date checks on this plan.
*
*/
boolean getIgnoreDateCheck();
// required string planFile = 5;
/**
* required string planFile = 5;
*
*
* Plan file path
*
*/
boolean hasPlanFile();
/**
* required string planFile = 5;
*
*
* Plan file path
*
*/
java.lang.String getPlanFile();
/**
* required string planFile = 5;
*
*
* Plan file path
*
*/
org.apache.hadoop.shaded.com.google.protobuf.ByteString
getPlanFileBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.SubmitDiskBalancerPlanRequestProto}
*
*
**
* This message allows a client to submit a disk
* balancer plan to a data node.
*
*/
public static final class SubmitDiskBalancerPlanRequestProto extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage
implements SubmitDiskBalancerPlanRequestProtoOrBuilder {
// Use SubmitDiskBalancerPlanRequestProto.newBuilder() to construct.
private SubmitDiskBalancerPlanRequestProto(org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private SubmitDiskBalancerPlanRequestProto(boolean noInit) { this.unknownFields = org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final SubmitDiskBalancerPlanRequestProto defaultInstance;
public static SubmitDiskBalancerPlanRequestProto getDefaultInstance() {
return defaultInstance;
}
public SubmitDiskBalancerPlanRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private SubmitDiskBalancerPlanRequestProto(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
planID_ = input.readBytes();
break;
}
case 18: {
bitField0_ |= 0x00000002;
plan_ = input.readBytes();
break;
}
case 24: {
bitField0_ |= 0x00000004;
planVersion_ = input.readUInt64();
break;
}
case 32: {
bitField0_ |= 0x00000008;
ignoreDateCheck_ = input.readBool();
break;
}
case 42: {
bitField0_ |= 0x00000010;
planFile_ = input.readBytes();
break;
}
}
}
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_SubmitDiskBalancerPlanRequestProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_SubmitDiskBalancerPlanRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto.Builder.class);
}
public static org.apache.hadoop.shaded.com.google.protobuf.Parser PARSER =
new org.apache.hadoop.shaded.com.google.protobuf.AbstractParser() {
public SubmitDiskBalancerPlanRequestProto parsePartialFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return new SubmitDiskBalancerPlanRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public org.apache.hadoop.shaded.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required string planID = 1;
public static final int PLANID_FIELD_NUMBER = 1;
private java.lang.Object planID_;
/**
* required string planID = 1;
*
*
* A hash of the plan like SHA-1
*
*/
public boolean hasPlanID() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string planID = 1;
*
*
* A hash of the plan like SHA-1
*
*/
public java.lang.String getPlanID() {
java.lang.Object ref = planID_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.shaded.com.google.protobuf.ByteString bs =
(org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
planID_ = s;
}
return s;
}
}
/**
* required string planID = 1;
*
*
* A hash of the plan like SHA-1
*
*/
public org.apache.hadoop.shaded.com.google.protobuf.ByteString
getPlanIDBytes() {
java.lang.Object ref = planID_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.shaded.com.google.protobuf.ByteString b =
org.apache.hadoop.shaded.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
planID_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
}
}
// required string plan = 2;
public static final int PLAN_FIELD_NUMBER = 2;
private java.lang.Object plan_;
/**
* required string plan = 2;
*
*
* Plan file data in Json format
*
*/
public boolean hasPlan() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string plan = 2;
*
*
* Plan file data in Json format
*
*/
public java.lang.String getPlan() {
java.lang.Object ref = plan_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.shaded.com.google.protobuf.ByteString bs =
(org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
plan_ = s;
}
return s;
}
}
/**
* required string plan = 2;
*
*
* Plan file data in Json format
*
*/
public org.apache.hadoop.shaded.com.google.protobuf.ByteString
getPlanBytes() {
java.lang.Object ref = plan_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.shaded.com.google.protobuf.ByteString b =
org.apache.hadoop.shaded.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
plan_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
}
}
// optional uint64 planVersion = 3;
public static final int PLANVERSION_FIELD_NUMBER = 3;
private long planVersion_;
/**
* optional uint64 planVersion = 3;
*
*
* Plan version number
*
*/
public boolean hasPlanVersion() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional uint64 planVersion = 3;
*
*
* Plan version number
*
*/
public long getPlanVersion() {
return planVersion_;
}
// optional bool ignoreDateCheck = 4;
public static final int IGNOREDATECHECK_FIELD_NUMBER = 4;
private boolean ignoreDateCheck_;
/**
* optional bool ignoreDateCheck = 4;
*
*
* Ignore date checks on this plan.
*
*/
public boolean hasIgnoreDateCheck() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional bool ignoreDateCheck = 4;
*
*
* Ignore date checks on this plan.
*
*/
public boolean getIgnoreDateCheck() {
return ignoreDateCheck_;
}
// required string planFile = 5;
public static final int PLANFILE_FIELD_NUMBER = 5;
private java.lang.Object planFile_;
/**
* required string planFile = 5;
*
*
* Plan file path
*
*/
public boolean hasPlanFile() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required string planFile = 5;
*
*
* Plan file path
*
*/
public java.lang.String getPlanFile() {
java.lang.Object ref = planFile_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.shaded.com.google.protobuf.ByteString bs =
(org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
planFile_ = s;
}
return s;
}
}
/**
* required string planFile = 5;
*
*
* Plan file path
*
*/
public org.apache.hadoop.shaded.com.google.protobuf.ByteString
getPlanFileBytes() {
java.lang.Object ref = planFile_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.shaded.com.google.protobuf.ByteString b =
org.apache.hadoop.shaded.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
planFile_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
planID_ = "";
plan_ = "";
planVersion_ = 0L;
ignoreDateCheck_ = false;
planFile_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasPlanID()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasPlan()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasPlanFile()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getPlanIDBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getPlanBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt64(3, planVersion_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeBool(4, ignoreDateCheck_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeBytes(5, getPlanFileBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteBytesSize(1, getPlanIDBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteBytesSize(2, getPlanBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteUInt64Size(3, planVersion_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteBoolSize(4, ignoreDateCheck_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteBytesSize(5, getPlanFileBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.org.apache.hadoop.shaded.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto) obj;
boolean result = true;
result = result && (hasPlanID() == other.hasPlanID());
if (hasPlanID()) {
result = result && getPlanID()
.equals(other.getPlanID());
}
result = result && (hasPlan() == other.hasPlan());
if (hasPlan()) {
result = result && getPlan()
.equals(other.getPlan());
}
result = result && (hasPlanVersion() == other.hasPlanVersion());
if (hasPlanVersion()) {
result = result && (getPlanVersion()
== other.getPlanVersion());
}
result = result && (hasIgnoreDateCheck() == other.hasIgnoreDateCheck());
if (hasIgnoreDateCheck()) {
result = result && (getIgnoreDateCheck()
== other.getIgnoreDateCheck());
}
result = result && (hasPlanFile() == other.hasPlanFile());
if (hasPlanFile()) {
result = result && getPlanFile()
.equals(other.getPlanFile());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasPlanID()) {
hash = (37 * hash) + PLANID_FIELD_NUMBER;
hash = (53 * hash) + getPlanID().hashCode();
}
if (hasPlan()) {
hash = (37 * hash) + PLAN_FIELD_NUMBER;
hash = (53 * hash) + getPlan().hashCode();
}
if (hasPlanVersion()) {
hash = (37 * hash) + PLANVERSION_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getPlanVersion());
}
if (hasIgnoreDateCheck()) {
hash = (37 * hash) + IGNOREDATECHECK_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getIgnoreDateCheck());
}
if (hasPlanFile()) {
hash = (37 * hash) + PLANFILE_FIELD_NUMBER;
hash = (53 * hash) + getPlanFile().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.SubmitDiskBalancerPlanRequestProto}
*
*
**
* This message allows a client to submit a disk
* balancer plan to a data node.
*
*/
public static final class Builder extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProtoOrBuilder {
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_SubmitDiskBalancerPlanRequestProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_SubmitDiskBalancerPlanRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
planID_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
plan_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
planVersion_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
ignoreDateCheck_ = false;
bitField0_ = (bitField0_ & ~0x00000008);
planFile_ = "";
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_SubmitDiskBalancerPlanRequestProto_descriptor;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto.getDefaultInstance();
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.planID_ = planID_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.plan_ = plan_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.planVersion_ = planVersion_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.ignoreDateCheck_ = ignoreDateCheck_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.planFile_ = planFile_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(org.apache.hadoop.shaded.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto.getDefaultInstance()) return this;
if (other.hasPlanID()) {
bitField0_ |= 0x00000001;
planID_ = other.planID_;
onChanged();
}
if (other.hasPlan()) {
bitField0_ |= 0x00000002;
plan_ = other.plan_;
onChanged();
}
if (other.hasPlanVersion()) {
setPlanVersion(other.getPlanVersion());
}
if (other.hasIgnoreDateCheck()) {
setIgnoreDateCheck(other.getIgnoreDateCheck());
}
if (other.hasPlanFile()) {
bitField0_ |= 0x00000010;
planFile_ = other.planFile_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasPlanID()) {
return false;
}
if (!hasPlan()) {
return false;
}
if (!hasPlanFile()) {
return false;
}
return true;
}
public Builder mergeFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string planID = 1;
private java.lang.Object planID_ = "";
/**
* required string planID = 1;
*
*
* A hash of the plan like SHA-1
*
*/
public boolean hasPlanID() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string planID = 1;
*
*
* A hash of the plan like SHA-1
*
*/
public java.lang.String getPlanID() {
java.lang.Object ref = planID_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref)
.toStringUtf8();
planID_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string planID = 1;
*
*
* A hash of the plan like SHA-1
*
*/
public org.apache.hadoop.shaded.com.google.protobuf.ByteString
getPlanIDBytes() {
java.lang.Object ref = planID_;
if (ref instanceof String) {
org.apache.hadoop.shaded.com.google.protobuf.ByteString b =
org.apache.hadoop.shaded.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
planID_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
}
}
/**
* required string planID = 1;
*
*
* A hash of the plan like SHA-1
*
*/
public Builder setPlanID(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
planID_ = value;
onChanged();
return this;
}
/**
* required string planID = 1;
*
*
* A hash of the plan like SHA-1
*
*/
public Builder clearPlanID() {
bitField0_ = (bitField0_ & ~0x00000001);
planID_ = getDefaultInstance().getPlanID();
onChanged();
return this;
}
/**
* required string planID = 1;
*
*
* A hash of the plan like SHA-1
*
*/
public Builder setPlanIDBytes(
org.apache.hadoop.shaded.com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
planID_ = value;
onChanged();
return this;
}
// required string plan = 2;
private java.lang.Object plan_ = "";
/**
* required string plan = 2;
*
*
* Plan file data in Json format
*
*/
public boolean hasPlan() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string plan = 2;
*
*
* Plan file data in Json format
*
*/
public java.lang.String getPlan() {
java.lang.Object ref = plan_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref)
.toStringUtf8();
plan_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string plan = 2;
*
*
* Plan file data in Json format
*
*/
public org.apache.hadoop.shaded.com.google.protobuf.ByteString
getPlanBytes() {
java.lang.Object ref = plan_;
if (ref instanceof String) {
org.apache.hadoop.shaded.com.google.protobuf.ByteString b =
org.apache.hadoop.shaded.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
plan_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
}
}
/**
* required string plan = 2;
*
*
* Plan file data in Json format
*
*/
public Builder setPlan(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
plan_ = value;
onChanged();
return this;
}
/**
* required string plan = 2;
*
*
* Plan file data in Json format
*
*/
public Builder clearPlan() {
bitField0_ = (bitField0_ & ~0x00000002);
plan_ = getDefaultInstance().getPlan();
onChanged();
return this;
}
/**
* required string plan = 2;
*
*
* Plan file data in Json format
*
*/
public Builder setPlanBytes(
org.apache.hadoop.shaded.com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
plan_ = value;
onChanged();
return this;
}
// optional uint64 planVersion = 3;
private long planVersion_ ;
/**
* optional uint64 planVersion = 3;
*
*
* Plan version number
*
*/
public boolean hasPlanVersion() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional uint64 planVersion = 3;
*
*
* Plan version number
*
*/
public long getPlanVersion() {
return planVersion_;
}
/**
* optional uint64 planVersion = 3;
*
*
* Plan version number
*
*/
public Builder setPlanVersion(long value) {
bitField0_ |= 0x00000004;
planVersion_ = value;
onChanged();
return this;
}
/**
* optional uint64 planVersion = 3;
*
*
* Plan version number
*
*/
public Builder clearPlanVersion() {
bitField0_ = (bitField0_ & ~0x00000004);
planVersion_ = 0L;
onChanged();
return this;
}
// optional bool ignoreDateCheck = 4;
private boolean ignoreDateCheck_ ;
/**
* optional bool ignoreDateCheck = 4;
*
*
* Ignore date checks on this plan.
*
*/
public boolean hasIgnoreDateCheck() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional bool ignoreDateCheck = 4;
*
*
* Ignore date checks on this plan.
*
*/
public boolean getIgnoreDateCheck() {
return ignoreDateCheck_;
}
/**
* optional bool ignoreDateCheck = 4;
*
*
* Ignore date checks on this plan.
*
*/
public Builder setIgnoreDateCheck(boolean value) {
bitField0_ |= 0x00000008;
ignoreDateCheck_ = value;
onChanged();
return this;
}
/**
* optional bool ignoreDateCheck = 4;
*
*
* Ignore date checks on this plan.
*
*/
public Builder clearIgnoreDateCheck() {
bitField0_ = (bitField0_ & ~0x00000008);
ignoreDateCheck_ = false;
onChanged();
return this;
}
// required string planFile = 5;
private java.lang.Object planFile_ = "";
/**
* required string planFile = 5;
*
*
* Plan file path
*
*/
public boolean hasPlanFile() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required string planFile = 5;
*
*
* Plan file path
*
*/
public java.lang.String getPlanFile() {
java.lang.Object ref = planFile_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref)
.toStringUtf8();
planFile_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string planFile = 5;
*
*
* Plan file path
*
*/
public org.apache.hadoop.shaded.com.google.protobuf.ByteString
getPlanFileBytes() {
java.lang.Object ref = planFile_;
if (ref instanceof String) {
org.apache.hadoop.shaded.com.google.protobuf.ByteString b =
org.apache.hadoop.shaded.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
planFile_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
}
}
/**
* required string planFile = 5;
*
*
* Plan file path
*
*/
public Builder setPlanFile(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000010;
planFile_ = value;
onChanged();
return this;
}
/**
* required string planFile = 5;
*
*
* Plan file path
*
*/
public Builder clearPlanFile() {
bitField0_ = (bitField0_ & ~0x00000010);
planFile_ = getDefaultInstance().getPlanFile();
onChanged();
return this;
}
/**
* required string planFile = 5;
*
*
* Plan file path
*
*/
public Builder setPlanFileBytes(
org.apache.hadoop.shaded.com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000010;
planFile_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.SubmitDiskBalancerPlanRequestProto)
}
static {
defaultInstance = new SubmitDiskBalancerPlanRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.SubmitDiskBalancerPlanRequestProto)
}
public interface SubmitDiskBalancerPlanResponseProtoOrBuilder
extends org.apache.hadoop.shaded.com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.SubmitDiskBalancerPlanResponseProto}
*
*
**
* Response from the DataNode on Plan Submit request
*
*/
public static final class SubmitDiskBalancerPlanResponseProto extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage
implements SubmitDiskBalancerPlanResponseProtoOrBuilder {
// Use SubmitDiskBalancerPlanResponseProto.newBuilder() to construct.
private SubmitDiskBalancerPlanResponseProto(org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private SubmitDiskBalancerPlanResponseProto(boolean noInit) { this.unknownFields = org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final SubmitDiskBalancerPlanResponseProto defaultInstance;
public static SubmitDiskBalancerPlanResponseProto getDefaultInstance() {
return defaultInstance;
}
public SubmitDiskBalancerPlanResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private SubmitDiskBalancerPlanResponseProto(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
initFields();
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_SubmitDiskBalancerPlanResponseProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_SubmitDiskBalancerPlanResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto.Builder.class);
}
public static org.apache.hadoop.shaded.com.google.protobuf.Parser PARSER =
new org.apache.hadoop.shaded.com.google.protobuf.AbstractParser() {
public SubmitDiskBalancerPlanResponseProto parsePartialFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return new SubmitDiskBalancerPlanResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public org.apache.hadoop.shaded.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.org.apache.hadoop.shaded.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.SubmitDiskBalancerPlanResponseProto}
*
*
**
* Response from the DataNode on Plan Submit request
*
*/
public static final class Builder extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProtoOrBuilder {
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_SubmitDiskBalancerPlanResponseProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_SubmitDiskBalancerPlanResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_SubmitDiskBalancerPlanResponseProto_descriptor;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto.getDefaultInstance();
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(org.apache.hadoop.shaded.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.SubmitDiskBalancerPlanResponseProto)
}
static {
defaultInstance = new SubmitDiskBalancerPlanResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.SubmitDiskBalancerPlanResponseProto)
}
public interface CancelPlanRequestProtoOrBuilder
extends org.apache.hadoop.shaded.com.google.protobuf.MessageOrBuilder {
// required string planID = 1;
/**
* required string planID = 1;
*/
boolean hasPlanID();
/**
* required string planID = 1;
*/
java.lang.String getPlanID();
/**
* required string planID = 1;
*/
org.apache.hadoop.shaded.com.google.protobuf.ByteString
getPlanIDBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.CancelPlanRequestProto}
*
*
**
* This message describes a request to cancel an
* outstanding disk balancer plan
*
*/
public static final class CancelPlanRequestProto extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage
implements CancelPlanRequestProtoOrBuilder {
// Use CancelPlanRequestProto.newBuilder() to construct.
private CancelPlanRequestProto(org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private CancelPlanRequestProto(boolean noInit) { this.unknownFields = org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final CancelPlanRequestProto defaultInstance;
public static CancelPlanRequestProto getDefaultInstance() {
return defaultInstance;
}
public CancelPlanRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private CancelPlanRequestProto(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
planID_ = input.readBytes();
break;
}
}
}
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_CancelPlanRequestProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_CancelPlanRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto.Builder.class);
}
public static org.apache.hadoop.shaded.com.google.protobuf.Parser PARSER =
new org.apache.hadoop.shaded.com.google.protobuf.AbstractParser() {
public CancelPlanRequestProto parsePartialFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return new CancelPlanRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public org.apache.hadoop.shaded.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required string planID = 1;
public static final int PLANID_FIELD_NUMBER = 1;
private java.lang.Object planID_;
/**
* required string planID = 1;
*/
public boolean hasPlanID() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string planID = 1;
*/
public java.lang.String getPlanID() {
java.lang.Object ref = planID_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.shaded.com.google.protobuf.ByteString bs =
(org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
planID_ = s;
}
return s;
}
}
/**
* required string planID = 1;
*/
public org.apache.hadoop.shaded.com.google.protobuf.ByteString
getPlanIDBytes() {
java.lang.Object ref = planID_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.shaded.com.google.protobuf.ByteString b =
org.apache.hadoop.shaded.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
planID_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
planID_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasPlanID()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getPlanIDBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteBytesSize(1, getPlanIDBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.org.apache.hadoop.shaded.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto) obj;
boolean result = true;
result = result && (hasPlanID() == other.hasPlanID());
if (hasPlanID()) {
result = result && getPlanID()
.equals(other.getPlanID());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasPlanID()) {
hash = (37 * hash) + PLANID_FIELD_NUMBER;
hash = (53 * hash) + getPlanID().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.CancelPlanRequestProto}
*
*
**
* This message describes a request to cancel an
* outstanding disk balancer plan
*
*/
public static final class Builder extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProtoOrBuilder {
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_CancelPlanRequestProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_CancelPlanRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
planID_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_CancelPlanRequestProto_descriptor;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto.getDefaultInstance();
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.planID_ = planID_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(org.apache.hadoop.shaded.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto.getDefaultInstance()) return this;
if (other.hasPlanID()) {
bitField0_ |= 0x00000001;
planID_ = other.planID_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasPlanID()) {
return false;
}
return true;
}
public Builder mergeFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string planID = 1;
private java.lang.Object planID_ = "";
/**
* required string planID = 1;
*/
public boolean hasPlanID() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string planID = 1;
*/
public java.lang.String getPlanID() {
java.lang.Object ref = planID_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref)
.toStringUtf8();
planID_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string planID = 1;
*/
public org.apache.hadoop.shaded.com.google.protobuf.ByteString
getPlanIDBytes() {
java.lang.Object ref = planID_;
if (ref instanceof String) {
org.apache.hadoop.shaded.com.google.protobuf.ByteString b =
org.apache.hadoop.shaded.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
planID_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
}
}
/**
* required string planID = 1;
*/
public Builder setPlanID(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
planID_ = value;
onChanged();
return this;
}
/**
* required string planID = 1;
*/
public Builder clearPlanID() {
bitField0_ = (bitField0_ & ~0x00000001);
planID_ = getDefaultInstance().getPlanID();
onChanged();
return this;
}
/**
* required string planID = 1;
*/
public Builder setPlanIDBytes(
org.apache.hadoop.shaded.com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
planID_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.CancelPlanRequestProto)
}
static {
defaultInstance = new CancelPlanRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.CancelPlanRequestProto)
}
public interface CancelPlanResponseProtoOrBuilder
extends org.apache.hadoop.shaded.com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.CancelPlanResponseProto}
*
*
**
* This is the response for the cancellation request
*
*/
public static final class CancelPlanResponseProto extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage
implements CancelPlanResponseProtoOrBuilder {
// Use CancelPlanResponseProto.newBuilder() to construct.
private CancelPlanResponseProto(org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private CancelPlanResponseProto(boolean noInit) { this.unknownFields = org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final CancelPlanResponseProto defaultInstance;
public static CancelPlanResponseProto getDefaultInstance() {
return defaultInstance;
}
public CancelPlanResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private CancelPlanResponseProto(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
initFields();
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_CancelPlanResponseProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_CancelPlanResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto.Builder.class);
}
public static org.apache.hadoop.shaded.com.google.protobuf.Parser PARSER =
new org.apache.hadoop.shaded.com.google.protobuf.AbstractParser() {
public CancelPlanResponseProto parsePartialFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return new CancelPlanResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public org.apache.hadoop.shaded.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.org.apache.hadoop.shaded.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.CancelPlanResponseProto}
*
*
**
* This is the response for the cancellation request
*
*/
public static final class Builder extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProtoOrBuilder {
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_CancelPlanResponseProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_CancelPlanResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_CancelPlanResponseProto_descriptor;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto.getDefaultInstance();
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(org.apache.hadoop.shaded.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.CancelPlanResponseProto)
}
static {
defaultInstance = new CancelPlanResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.CancelPlanResponseProto)
}
public interface QueryPlanStatusRequestProtoOrBuilder
extends org.apache.hadoop.shaded.com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.QueryPlanStatusRequestProto}
*
*
**
* This message allows a client to query data node to see
* if a disk balancer plan is executing and if so what is
* the status.
*
*/
public static final class QueryPlanStatusRequestProto extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage
implements QueryPlanStatusRequestProtoOrBuilder {
// Use QueryPlanStatusRequestProto.newBuilder() to construct.
private QueryPlanStatusRequestProto(org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private QueryPlanStatusRequestProto(boolean noInit) { this.unknownFields = org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final QueryPlanStatusRequestProto defaultInstance;
public static QueryPlanStatusRequestProto getDefaultInstance() {
return defaultInstance;
}
public QueryPlanStatusRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private QueryPlanStatusRequestProto(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
initFields();
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_QueryPlanStatusRequestProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_QueryPlanStatusRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto.Builder.class);
}
public static org.apache.hadoop.shaded.com.google.protobuf.Parser PARSER =
new org.apache.hadoop.shaded.com.google.protobuf.AbstractParser() {
public QueryPlanStatusRequestProto parsePartialFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return new QueryPlanStatusRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public org.apache.hadoop.shaded.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.org.apache.hadoop.shaded.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.QueryPlanStatusRequestProto}
*
*
**
* This message allows a client to query data node to see
* if a disk balancer plan is executing and if so what is
* the status.
*
*/
public static final class Builder extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProtoOrBuilder {
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_QueryPlanStatusRequestProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_QueryPlanStatusRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_QueryPlanStatusRequestProto_descriptor;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto.getDefaultInstance();
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(org.apache.hadoop.shaded.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.QueryPlanStatusRequestProto)
}
static {
defaultInstance = new QueryPlanStatusRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.QueryPlanStatusRequestProto)
}
public interface QueryPlanStatusResponseProtoOrBuilder
extends org.apache.hadoop.shaded.com.google.protobuf.MessageOrBuilder {
// optional uint32 result = 1;
/**
* optional uint32 result = 1;
*/
boolean hasResult();
/**
* optional uint32 result = 1;
*/
int getResult();
// optional string planID = 2;
/**
* optional string planID = 2;
*/
boolean hasPlanID();
/**
* optional string planID = 2;
*/
java.lang.String getPlanID();
/**
* optional string planID = 2;
*/
org.apache.hadoop.shaded.com.google.protobuf.ByteString
getPlanIDBytes();
// optional string currentStatus = 3;
/**
* optional string currentStatus = 3;
*/
boolean hasCurrentStatus();
/**
* optional string currentStatus = 3;
*/
java.lang.String getCurrentStatus();
/**
* optional string currentStatus = 3;
*/
org.apache.hadoop.shaded.com.google.protobuf.ByteString
getCurrentStatusBytes();
// optional string planFile = 4;
/**
* optional string planFile = 4;
*/
boolean hasPlanFile();
/**
* optional string planFile = 4;
*/
java.lang.String getPlanFile();
/**
* optional string planFile = 4;
*/
org.apache.hadoop.shaded.com.google.protobuf.ByteString
getPlanFileBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.QueryPlanStatusResponseProto}
*
*
**
* This message describes a plan if it is in progress
*
*/
public static final class QueryPlanStatusResponseProto extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage
implements QueryPlanStatusResponseProtoOrBuilder {
// Use QueryPlanStatusResponseProto.newBuilder() to construct.
private QueryPlanStatusResponseProto(org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private QueryPlanStatusResponseProto(boolean noInit) { this.unknownFields = org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final QueryPlanStatusResponseProto defaultInstance;
public static QueryPlanStatusResponseProto getDefaultInstance() {
return defaultInstance;
}
public QueryPlanStatusResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private QueryPlanStatusResponseProto(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
result_ = input.readUInt32();
break;
}
case 18: {
bitField0_ |= 0x00000002;
planID_ = input.readBytes();
break;
}
case 26: {
bitField0_ |= 0x00000004;
currentStatus_ = input.readBytes();
break;
}
case 34: {
bitField0_ |= 0x00000008;
planFile_ = input.readBytes();
break;
}
}
}
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_QueryPlanStatusResponseProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_QueryPlanStatusResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto.Builder.class);
}
public static org.apache.hadoop.shaded.com.google.protobuf.Parser PARSER =
new org.apache.hadoop.shaded.com.google.protobuf.AbstractParser() {
public QueryPlanStatusResponseProto parsePartialFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return new QueryPlanStatusResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public org.apache.hadoop.shaded.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// optional uint32 result = 1;
public static final int RESULT_FIELD_NUMBER = 1;
private int result_;
/**
* optional uint32 result = 1;
*/
public boolean hasResult() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* optional uint32 result = 1;
*/
public int getResult() {
return result_;
}
// optional string planID = 2;
public static final int PLANID_FIELD_NUMBER = 2;
private java.lang.Object planID_;
/**
* optional string planID = 2;
*/
public boolean hasPlanID() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional string planID = 2;
*/
public java.lang.String getPlanID() {
java.lang.Object ref = planID_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.shaded.com.google.protobuf.ByteString bs =
(org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
planID_ = s;
}
return s;
}
}
/**
* optional string planID = 2;
*/
public org.apache.hadoop.shaded.com.google.protobuf.ByteString
getPlanIDBytes() {
java.lang.Object ref = planID_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.shaded.com.google.protobuf.ByteString b =
org.apache.hadoop.shaded.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
planID_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
}
}
// optional string currentStatus = 3;
public static final int CURRENTSTATUS_FIELD_NUMBER = 3;
private java.lang.Object currentStatus_;
/**
* optional string currentStatus = 3;
*/
public boolean hasCurrentStatus() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional string currentStatus = 3;
*/
public java.lang.String getCurrentStatus() {
java.lang.Object ref = currentStatus_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.shaded.com.google.protobuf.ByteString bs =
(org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
currentStatus_ = s;
}
return s;
}
}
/**
* optional string currentStatus = 3;
*/
public org.apache.hadoop.shaded.com.google.protobuf.ByteString
getCurrentStatusBytes() {
java.lang.Object ref = currentStatus_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.shaded.com.google.protobuf.ByteString b =
org.apache.hadoop.shaded.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
currentStatus_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
}
}
// optional string planFile = 4;
public static final int PLANFILE_FIELD_NUMBER = 4;
private java.lang.Object planFile_;
/**
* optional string planFile = 4;
*/
public boolean hasPlanFile() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional string planFile = 4;
*/
public java.lang.String getPlanFile() {
java.lang.Object ref = planFile_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.shaded.com.google.protobuf.ByteString bs =
(org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
planFile_ = s;
}
return s;
}
}
/**
* optional string planFile = 4;
*/
public org.apache.hadoop.shaded.com.google.protobuf.ByteString
getPlanFileBytes() {
java.lang.Object ref = planFile_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.shaded.com.google.protobuf.ByteString b =
org.apache.hadoop.shaded.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
planFile_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
result_ = 0;
planID_ = "";
currentStatus_ = "";
planFile_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt32(1, result_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getPlanIDBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, getCurrentStatusBytes());
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeBytes(4, getPlanFileBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteUInt32Size(1, result_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteBytesSize(2, getPlanIDBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteBytesSize(3, getCurrentStatusBytes());
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteBytesSize(4, getPlanFileBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.org.apache.hadoop.shaded.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto) obj;
boolean result = true;
result = result && (hasResult() == other.hasResult());
if (hasResult()) {
result = result && (getResult()
== other.getResult());
}
result = result && (hasPlanID() == other.hasPlanID());
if (hasPlanID()) {
result = result && getPlanID()
.equals(other.getPlanID());
}
result = result && (hasCurrentStatus() == other.hasCurrentStatus());
if (hasCurrentStatus()) {
result = result && getCurrentStatus()
.equals(other.getCurrentStatus());
}
result = result && (hasPlanFile() == other.hasPlanFile());
if (hasPlanFile()) {
result = result && getPlanFile()
.equals(other.getPlanFile());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasResult()) {
hash = (37 * hash) + RESULT_FIELD_NUMBER;
hash = (53 * hash) + getResult();
}
if (hasPlanID()) {
hash = (37 * hash) + PLANID_FIELD_NUMBER;
hash = (53 * hash) + getPlanID().hashCode();
}
if (hasCurrentStatus()) {
hash = (37 * hash) + CURRENTSTATUS_FIELD_NUMBER;
hash = (53 * hash) + getCurrentStatus().hashCode();
}
if (hasPlanFile()) {
hash = (37 * hash) + PLANFILE_FIELD_NUMBER;
hash = (53 * hash) + getPlanFile().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.QueryPlanStatusResponseProto}
*
*
**
* This message describes a plan if it is in progress
*
*/
public static final class Builder extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProtoOrBuilder {
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_QueryPlanStatusResponseProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_QueryPlanStatusResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
result_ = 0;
bitField0_ = (bitField0_ & ~0x00000001);
planID_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
currentStatus_ = "";
bitField0_ = (bitField0_ & ~0x00000004);
planFile_ = "";
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_QueryPlanStatusResponseProto_descriptor;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto.getDefaultInstance();
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.result_ = result_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.planID_ = planID_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.currentStatus_ = currentStatus_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.planFile_ = planFile_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(org.apache.hadoop.shaded.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto.getDefaultInstance()) return this;
if (other.hasResult()) {
setResult(other.getResult());
}
if (other.hasPlanID()) {
bitField0_ |= 0x00000002;
planID_ = other.planID_;
onChanged();
}
if (other.hasCurrentStatus()) {
bitField0_ |= 0x00000004;
currentStatus_ = other.currentStatus_;
onChanged();
}
if (other.hasPlanFile()) {
bitField0_ |= 0x00000008;
planFile_ = other.planFile_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// optional uint32 result = 1;
private int result_ ;
/**
* optional uint32 result = 1;
*/
public boolean hasResult() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* optional uint32 result = 1;
*/
public int getResult() {
return result_;
}
/**
* optional uint32 result = 1;
*/
public Builder setResult(int value) {
bitField0_ |= 0x00000001;
result_ = value;
onChanged();
return this;
}
/**
* optional uint32 result = 1;
*/
public Builder clearResult() {
bitField0_ = (bitField0_ & ~0x00000001);
result_ = 0;
onChanged();
return this;
}
// optional string planID = 2;
private java.lang.Object planID_ = "";
/**
* optional string planID = 2;
*/
public boolean hasPlanID() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional string planID = 2;
*/
public java.lang.String getPlanID() {
java.lang.Object ref = planID_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref)
.toStringUtf8();
planID_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string planID = 2;
*/
public org.apache.hadoop.shaded.com.google.protobuf.ByteString
getPlanIDBytes() {
java.lang.Object ref = planID_;
if (ref instanceof String) {
org.apache.hadoop.shaded.com.google.protobuf.ByteString b =
org.apache.hadoop.shaded.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
planID_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
}
}
/**
* optional string planID = 2;
*/
public Builder setPlanID(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
planID_ = value;
onChanged();
return this;
}
/**
* optional string planID = 2;
*/
public Builder clearPlanID() {
bitField0_ = (bitField0_ & ~0x00000002);
planID_ = getDefaultInstance().getPlanID();
onChanged();
return this;
}
/**
* optional string planID = 2;
*/
public Builder setPlanIDBytes(
org.apache.hadoop.shaded.com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
planID_ = value;
onChanged();
return this;
}
// optional string currentStatus = 3;
private java.lang.Object currentStatus_ = "";
/**
* optional string currentStatus = 3;
*/
public boolean hasCurrentStatus() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional string currentStatus = 3;
*/
public java.lang.String getCurrentStatus() {
java.lang.Object ref = currentStatus_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref)
.toStringUtf8();
currentStatus_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string currentStatus = 3;
*/
public org.apache.hadoop.shaded.com.google.protobuf.ByteString
getCurrentStatusBytes() {
java.lang.Object ref = currentStatus_;
if (ref instanceof String) {
org.apache.hadoop.shaded.com.google.protobuf.ByteString b =
org.apache.hadoop.shaded.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
currentStatus_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
}
}
/**
* optional string currentStatus = 3;
*/
public Builder setCurrentStatus(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
currentStatus_ = value;
onChanged();
return this;
}
/**
* optional string currentStatus = 3;
*/
public Builder clearCurrentStatus() {
bitField0_ = (bitField0_ & ~0x00000004);
currentStatus_ = getDefaultInstance().getCurrentStatus();
onChanged();
return this;
}
/**
* optional string currentStatus = 3;
*/
public Builder setCurrentStatusBytes(
org.apache.hadoop.shaded.com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
currentStatus_ = value;
onChanged();
return this;
}
// optional string planFile = 4;
private java.lang.Object planFile_ = "";
/**
* optional string planFile = 4;
*/
public boolean hasPlanFile() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional string planFile = 4;
*/
public java.lang.String getPlanFile() {
java.lang.Object ref = planFile_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref)
.toStringUtf8();
planFile_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string planFile = 4;
*/
public org.apache.hadoop.shaded.com.google.protobuf.ByteString
getPlanFileBytes() {
java.lang.Object ref = planFile_;
if (ref instanceof String) {
org.apache.hadoop.shaded.com.google.protobuf.ByteString b =
org.apache.hadoop.shaded.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
planFile_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
}
}
/**
* optional string planFile = 4;
*/
public Builder setPlanFile(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
planFile_ = value;
onChanged();
return this;
}
/**
* optional string planFile = 4;
*/
public Builder clearPlanFile() {
bitField0_ = (bitField0_ & ~0x00000008);
planFile_ = getDefaultInstance().getPlanFile();
onChanged();
return this;
}
/**
* optional string planFile = 4;
*/
public Builder setPlanFileBytes(
org.apache.hadoop.shaded.com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
planFile_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.QueryPlanStatusResponseProto)
}
static {
defaultInstance = new QueryPlanStatusResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.QueryPlanStatusResponseProto)
}
public interface DiskBalancerSettingRequestProtoOrBuilder
extends org.apache.hadoop.shaded.com.google.protobuf.MessageOrBuilder {
// required string key = 1;
/**
* required string key = 1;
*/
boolean hasKey();
/**
* required string key = 1;
*/
java.lang.String getKey();
/**
* required string key = 1;
*/
org.apache.hadoop.shaded.com.google.protobuf.ByteString
getKeyBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.DiskBalancerSettingRequestProto}
*
*
**
* This message sends a request to data node get a specific setting
* that is used by disk balancer.
*
*/
public static final class DiskBalancerSettingRequestProto extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage
implements DiskBalancerSettingRequestProtoOrBuilder {
// Use DiskBalancerSettingRequestProto.newBuilder() to construct.
private DiskBalancerSettingRequestProto(org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private DiskBalancerSettingRequestProto(boolean noInit) { this.unknownFields = org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final DiskBalancerSettingRequestProto defaultInstance;
public static DiskBalancerSettingRequestProto getDefaultInstance() {
return defaultInstance;
}
public DiskBalancerSettingRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DiskBalancerSettingRequestProto(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
key_ = input.readBytes();
break;
}
}
}
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DiskBalancerSettingRequestProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DiskBalancerSettingRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto.Builder.class);
}
public static org.apache.hadoop.shaded.com.google.protobuf.Parser PARSER =
new org.apache.hadoop.shaded.com.google.protobuf.AbstractParser() {
public DiskBalancerSettingRequestProto parsePartialFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return new DiskBalancerSettingRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public org.apache.hadoop.shaded.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required string key = 1;
public static final int KEY_FIELD_NUMBER = 1;
private java.lang.Object key_;
/**
* required string key = 1;
*/
public boolean hasKey() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string key = 1;
*/
public java.lang.String getKey() {
java.lang.Object ref = key_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.shaded.com.google.protobuf.ByteString bs =
(org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
key_ = s;
}
return s;
}
}
/**
* required string key = 1;
*/
public org.apache.hadoop.shaded.com.google.protobuf.ByteString
getKeyBytes() {
java.lang.Object ref = key_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.shaded.com.google.protobuf.ByteString b =
org.apache.hadoop.shaded.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
key_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
key_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasKey()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getKeyBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteBytesSize(1, getKeyBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.org.apache.hadoop.shaded.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto) obj;
boolean result = true;
result = result && (hasKey() == other.hasKey());
if (hasKey()) {
result = result && getKey()
.equals(other.getKey());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasKey()) {
hash = (37 * hash) + KEY_FIELD_NUMBER;
hash = (53 * hash) + getKey().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.DiskBalancerSettingRequestProto}
*
*
**
* This message sends a request to data node get a specific setting
* that is used by disk balancer.
*
*/
public static final class Builder extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProtoOrBuilder {
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DiskBalancerSettingRequestProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DiskBalancerSettingRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
key_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DiskBalancerSettingRequestProto_descriptor;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto.getDefaultInstance();
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.key_ = key_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(org.apache.hadoop.shaded.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto.getDefaultInstance()) return this;
if (other.hasKey()) {
bitField0_ |= 0x00000001;
key_ = other.key_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasKey()) {
return false;
}
return true;
}
public Builder mergeFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string key = 1;
private java.lang.Object key_ = "";
/**
* required string key = 1;
*/
public boolean hasKey() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string key = 1;
*/
public java.lang.String getKey() {
java.lang.Object ref = key_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref)
.toStringUtf8();
key_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string key = 1;
*/
public org.apache.hadoop.shaded.com.google.protobuf.ByteString
getKeyBytes() {
java.lang.Object ref = key_;
if (ref instanceof String) {
org.apache.hadoop.shaded.com.google.protobuf.ByteString b =
org.apache.hadoop.shaded.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
key_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
}
}
/**
* required string key = 1;
*/
public Builder setKey(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
key_ = value;
onChanged();
return this;
}
/**
* required string key = 1;
*/
public Builder clearKey() {
bitField0_ = (bitField0_ & ~0x00000001);
key_ = getDefaultInstance().getKey();
onChanged();
return this;
}
/**
* required string key = 1;
*/
public Builder setKeyBytes(
org.apache.hadoop.shaded.com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
key_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DiskBalancerSettingRequestProto)
}
static {
defaultInstance = new DiskBalancerSettingRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DiskBalancerSettingRequestProto)
}
public interface DiskBalancerSettingResponseProtoOrBuilder
extends org.apache.hadoop.shaded.com.google.protobuf.MessageOrBuilder {
// required string value = 1;
/**
* required string value = 1;
*/
boolean hasValue();
/**
* required string value = 1;
*/
java.lang.String getValue();
/**
* required string value = 1;
*/
org.apache.hadoop.shaded.com.google.protobuf.ByteString
getValueBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.DiskBalancerSettingResponseProto}
*
*
**
* Response that describes the value of requested disk balancer setting.
*
*/
public static final class DiskBalancerSettingResponseProto extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage
implements DiskBalancerSettingResponseProtoOrBuilder {
// Use DiskBalancerSettingResponseProto.newBuilder() to construct.
private DiskBalancerSettingResponseProto(org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private DiskBalancerSettingResponseProto(boolean noInit) { this.unknownFields = org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final DiskBalancerSettingResponseProto defaultInstance;
public static DiskBalancerSettingResponseProto getDefaultInstance() {
return defaultInstance;
}
public DiskBalancerSettingResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DiskBalancerSettingResponseProto(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
value_ = input.readBytes();
break;
}
}
}
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DiskBalancerSettingResponseProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DiskBalancerSettingResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto.Builder.class);
}
public static org.apache.hadoop.shaded.com.google.protobuf.Parser PARSER =
new org.apache.hadoop.shaded.com.google.protobuf.AbstractParser() {
public DiskBalancerSettingResponseProto parsePartialFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return new DiskBalancerSettingResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public org.apache.hadoop.shaded.com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required string value = 1;
public static final int VALUE_FIELD_NUMBER = 1;
private java.lang.Object value_;
/**
* required string value = 1;
*/
public boolean hasValue() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string value = 1;
*/
public java.lang.String getValue() {
java.lang.Object ref = value_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.shaded.com.google.protobuf.ByteString bs =
(org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
value_ = s;
}
return s;
}
}
/**
* required string value = 1;
*/
public org.apache.hadoop.shaded.com.google.protobuf.ByteString
getValueBytes() {
java.lang.Object ref = value_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.shaded.com.google.protobuf.ByteString b =
org.apache.hadoop.shaded.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
value_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
value_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasValue()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getValueBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += org.apache.hadoop.shaded.com.google.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteBytesSize(1, getValueBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.org.apache.hadoop.shaded.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto) obj;
boolean result = true;
result = result && (hasValue() == other.hasValue());
if (hasValue()) {
result = result && getValue()
.equals(other.getValue());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasValue()) {
hash = (37 * hash) + VALUE_FIELD_NUMBER;
hash = (53 * hash) + getValue().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.ByteString data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto parseFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.DiskBalancerSettingResponseProto}
*
*
**
* Response that describes the value of requested disk balancer setting.
*
*/
public static final class Builder extends
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProtoOrBuilder {
public static final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DiskBalancerSettingResponseProto_descriptor;
}
protected org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DiskBalancerSettingResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
value_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DiskBalancerSettingResponseProto_descriptor;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto.getDefaultInstance();
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.value_ = value_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(org.apache.hadoop.shaded.com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto.getDefaultInstance()) return this;
if (other.hasValue()) {
bitField0_ |= 0x00000001;
value_ = other.value_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasValue()) {
return false;
}
return true;
}
public Builder mergeFrom(
org.apache.hadoop.shaded.com.google.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string value = 1;
private java.lang.Object value_ = "";
/**
* required string value = 1;
*/
public boolean hasValue() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string value = 1;
*/
public java.lang.String getValue() {
java.lang.Object ref = value_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref)
.toStringUtf8();
value_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string value = 1;
*/
public org.apache.hadoop.shaded.com.google.protobuf.ByteString
getValueBytes() {
java.lang.Object ref = value_;
if (ref instanceof String) {
org.apache.hadoop.shaded.com.google.protobuf.ByteString b =
org.apache.hadoop.shaded.com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
value_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.com.google.protobuf.ByteString) ref;
}
}
/**
* required string value = 1;
*/
public Builder setValue(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
value_ = value;
onChanged();
return this;
}
/**
* required string value = 1;
*/
public Builder clearValue() {
bitField0_ = (bitField0_ & ~0x00000001);
value_ = getDefaultInstance().getValue();
onChanged();
return this;
}
/**
* required string value = 1;
*/
public Builder setValueBytes(
org.apache.hadoop.shaded.com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
value_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DiskBalancerSettingResponseProto)
}
static {
defaultInstance = new DiskBalancerSettingResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DiskBalancerSettingResponseProto)
}
/**
* Protobuf service {@code hadoop.hdfs.ClientDatanodeProtocolService}
*
*
**
* Protocol used from client to the Datanode.
* See the request and response for details of rpc call.
*
*/
public static abstract class ClientDatanodeProtocolService
implements org.apache.hadoop.shaded.com.google.protobuf.Service {
protected ClientDatanodeProtocolService() {}
public interface Interface {
/**
* rpc getReplicaVisibleLength(.hadoop.hdfs.GetReplicaVisibleLengthRequestProto) returns (.hadoop.hdfs.GetReplicaVisibleLengthResponseProto);
*
*
**
* Returns the visible length of the replica
*
*/
public abstract void getReplicaVisibleLength(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc refreshNamenodes(.hadoop.hdfs.RefreshNamenodesRequestProto) returns (.hadoop.hdfs.RefreshNamenodesResponseProto);
*
*
**
* Refresh the list of federated namenodes from updated configuration.
* Adds new namenodes and stops the deleted namenodes.
*
*/
public abstract void refreshNamenodes(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc deleteBlockPool(.hadoop.hdfs.DeleteBlockPoolRequestProto) returns (.hadoop.hdfs.DeleteBlockPoolResponseProto);
*
*
**
* Delete the block pool from the datanode.
*
*/
public abstract void deleteBlockPool(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc getBlockLocalPathInfo(.hadoop.hdfs.GetBlockLocalPathInfoRequestProto) returns (.hadoop.hdfs.GetBlockLocalPathInfoResponseProto);
*
*
**
* Retrieves the path names of the block file and metadata file stored on the
* local file system.
*
*/
public abstract void getBlockLocalPathInfo(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc shutdownDatanode(.hadoop.hdfs.ShutdownDatanodeRequestProto) returns (.hadoop.hdfs.ShutdownDatanodeResponseProto);
*/
public abstract void shutdownDatanode(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc evictWriters(.hadoop.hdfs.EvictWritersRequestProto) returns (.hadoop.hdfs.EvictWritersResponseProto);
*/
public abstract void evictWriters(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc getDatanodeInfo(.hadoop.hdfs.GetDatanodeInfoRequestProto) returns (.hadoop.hdfs.GetDatanodeInfoResponseProto);
*/
public abstract void getDatanodeInfo(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc getVolumeReport(.hadoop.hdfs.GetVolumeReportRequestProto) returns (.hadoop.hdfs.GetVolumeReportResponseProto);
*/
public abstract void getVolumeReport(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc getReconfigurationStatus(.hadoop.hdfs.GetReconfigurationStatusRequestProto) returns (.hadoop.hdfs.GetReconfigurationStatusResponseProto);
*/
public abstract void getReconfigurationStatus(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc startReconfiguration(.hadoop.hdfs.StartReconfigurationRequestProto) returns (.hadoop.hdfs.StartReconfigurationResponseProto);
*/
public abstract void startReconfiguration(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc listReconfigurableProperties(.hadoop.hdfs.ListReconfigurablePropertiesRequestProto) returns (.hadoop.hdfs.ListReconfigurablePropertiesResponseProto);
*/
public abstract void listReconfigurableProperties(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc triggerBlockReport(.hadoop.hdfs.TriggerBlockReportRequestProto) returns (.hadoop.hdfs.TriggerBlockReportResponseProto);
*/
public abstract void triggerBlockReport(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc getBalancerBandwidth(.hadoop.hdfs.GetBalancerBandwidthRequestProto) returns (.hadoop.hdfs.GetBalancerBandwidthResponseProto);
*
*
**
* Returns the balancer bandwidth value of datanode.
*
*/
public abstract void getBalancerBandwidth(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc submitDiskBalancerPlan(.hadoop.hdfs.SubmitDiskBalancerPlanRequestProto) returns (.hadoop.hdfs.SubmitDiskBalancerPlanResponseProto);
*
*
**
* Submit a disk balancer plan for execution
*
*/
public abstract void submitDiskBalancerPlan(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc cancelDiskBalancerPlan(.hadoop.hdfs.CancelPlanRequestProto) returns (.hadoop.hdfs.CancelPlanResponseProto);
*
*
**
* Cancel an executing plan
*
*/
public abstract void cancelDiskBalancerPlan(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc queryDiskBalancerPlan(.hadoop.hdfs.QueryPlanStatusRequestProto) returns (.hadoop.hdfs.QueryPlanStatusResponseProto);
*
*
**
* Gets the status of an executing Plan
*
*/
public abstract void queryDiskBalancerPlan(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc getDiskBalancerSetting(.hadoop.hdfs.DiskBalancerSettingRequestProto) returns (.hadoop.hdfs.DiskBalancerSettingResponseProto);
*
*
**
* Gets run-time settings of Disk Balancer.
*
*/
public abstract void getDiskBalancerSetting(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
}
public static org.apache.hadoop.shaded.com.google.protobuf.Service newReflectiveService(
final Interface impl) {
return new ClientDatanodeProtocolService() {
@java.lang.Override
public void getReplicaVisibleLength(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
impl.getReplicaVisibleLength(controller, request, done);
}
@java.lang.Override
public void refreshNamenodes(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
impl.refreshNamenodes(controller, request, done);
}
@java.lang.Override
public void deleteBlockPool(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
impl.deleteBlockPool(controller, request, done);
}
@java.lang.Override
public void getBlockLocalPathInfo(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
impl.getBlockLocalPathInfo(controller, request, done);
}
@java.lang.Override
public void shutdownDatanode(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
impl.shutdownDatanode(controller, request, done);
}
@java.lang.Override
public void evictWriters(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
impl.evictWriters(controller, request, done);
}
@java.lang.Override
public void getDatanodeInfo(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
impl.getDatanodeInfo(controller, request, done);
}
@java.lang.Override
public void getVolumeReport(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
impl.getVolumeReport(controller, request, done);
}
@java.lang.Override
public void getReconfigurationStatus(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
impl.getReconfigurationStatus(controller, request, done);
}
@java.lang.Override
public void startReconfiguration(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
impl.startReconfiguration(controller, request, done);
}
@java.lang.Override
public void listReconfigurableProperties(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
impl.listReconfigurableProperties(controller, request, done);
}
@java.lang.Override
public void triggerBlockReport(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
impl.triggerBlockReport(controller, request, done);
}
@java.lang.Override
public void getBalancerBandwidth(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
impl.getBalancerBandwidth(controller, request, done);
}
@java.lang.Override
public void submitDiskBalancerPlan(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
impl.submitDiskBalancerPlan(controller, request, done);
}
@java.lang.Override
public void cancelDiskBalancerPlan(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
impl.cancelDiskBalancerPlan(controller, request, done);
}
@java.lang.Override
public void queryDiskBalancerPlan(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
impl.queryDiskBalancerPlan(controller, request, done);
}
@java.lang.Override
public void getDiskBalancerSetting(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
impl.getDiskBalancerSetting(controller, request, done);
}
};
}
public static org.apache.hadoop.shaded.com.google.protobuf.BlockingService
newReflectiveBlockingService(final BlockingInterface impl) {
return new org.apache.hadoop.shaded.com.google.protobuf.BlockingService() {
public final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptorForType() {
return getDescriptor();
}
public final org.apache.hadoop.shaded.com.google.protobuf.Message callBlockingMethod(
org.apache.hadoop.shaded.com.google.protobuf.Descriptors.MethodDescriptor method,
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.com.google.protobuf.Message request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.callBlockingMethod() given method descriptor for " +
"wrong service type.");
}
switch(method.getIndex()) {
case 0:
return impl.getReplicaVisibleLength(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto)request);
case 1:
return impl.refreshNamenodes(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto)request);
case 2:
return impl.deleteBlockPool(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto)request);
case 3:
return impl.getBlockLocalPathInfo(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto)request);
case 4:
return impl.shutdownDatanode(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto)request);
case 5:
return impl.evictWriters(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto)request);
case 6:
return impl.getDatanodeInfo(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto)request);
case 7:
return impl.getVolumeReport(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto)request);
case 8:
return impl.getReconfigurationStatus(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusRequestProto)request);
case 9:
return impl.startReconfiguration(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationRequestProto)request);
case 10:
return impl.listReconfigurableProperties(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesRequestProto)request);
case 11:
return impl.triggerBlockReport(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto)request);
case 12:
return impl.getBalancerBandwidth(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto)request);
case 13:
return impl.submitDiskBalancerPlan(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto)request);
case 14:
return impl.cancelDiskBalancerPlan(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto)request);
case 15:
return impl.queryDiskBalancerPlan(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto)request);
case 16:
return impl.getDiskBalancerSetting(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto)request);
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final org.apache.hadoop.shaded.com.google.protobuf.Message
getRequestPrototype(
org.apache.hadoop.shaded.com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getRequestPrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.getDefaultInstance();
case 1:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.getDefaultInstance();
case 2:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.getDefaultInstance();
case 3:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.getDefaultInstance();
case 4:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto.getDefaultInstance();
case 5:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto.getDefaultInstance();
case 6:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto.getDefaultInstance();
case 7:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto.getDefaultInstance();
case 8:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusRequestProto.getDefaultInstance();
case 9:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationRequestProto.getDefaultInstance();
case 10:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesRequestProto.getDefaultInstance();
case 11:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto.getDefaultInstance();
case 12:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto.getDefaultInstance();
case 13:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto.getDefaultInstance();
case 14:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto.getDefaultInstance();
case 15:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto.getDefaultInstance();
case 16:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final org.apache.hadoop.shaded.com.google.protobuf.Message
getResponsePrototype(
org.apache.hadoop.shaded.com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getResponsePrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance();
case 1:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance();
case 2:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance();
case 3:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance();
case 4:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.getDefaultInstance();
case 5:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto.getDefaultInstance();
case 6:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.getDefaultInstance();
case 7:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto.getDefaultInstance();
case 8:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusResponseProto.getDefaultInstance();
case 9:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationResponseProto.getDefaultInstance();
case 10:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesResponseProto.getDefaultInstance();
case 11:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.getDefaultInstance();
case 12:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto.getDefaultInstance();
case 13:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto.getDefaultInstance();
case 14:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto.getDefaultInstance();
case 15:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto.getDefaultInstance();
case 16:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
};
}
/**
* rpc getReplicaVisibleLength(.hadoop.hdfs.GetReplicaVisibleLengthRequestProto) returns (.hadoop.hdfs.GetReplicaVisibleLengthResponseProto);
*
*
**
* Returns the visible length of the replica
*
*/
public abstract void getReplicaVisibleLength(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc refreshNamenodes(.hadoop.hdfs.RefreshNamenodesRequestProto) returns (.hadoop.hdfs.RefreshNamenodesResponseProto);
*
*
**
* Refresh the list of federated namenodes from updated configuration.
* Adds new namenodes and stops the deleted namenodes.
*
*/
public abstract void refreshNamenodes(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc deleteBlockPool(.hadoop.hdfs.DeleteBlockPoolRequestProto) returns (.hadoop.hdfs.DeleteBlockPoolResponseProto);
*
*
**
* Delete the block pool from the datanode.
*
*/
public abstract void deleteBlockPool(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc getBlockLocalPathInfo(.hadoop.hdfs.GetBlockLocalPathInfoRequestProto) returns (.hadoop.hdfs.GetBlockLocalPathInfoResponseProto);
*
*
**
* Retrieves the path names of the block file and metadata file stored on the
* local file system.
*
*/
public abstract void getBlockLocalPathInfo(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc shutdownDatanode(.hadoop.hdfs.ShutdownDatanodeRequestProto) returns (.hadoop.hdfs.ShutdownDatanodeResponseProto);
*/
public abstract void shutdownDatanode(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc evictWriters(.hadoop.hdfs.EvictWritersRequestProto) returns (.hadoop.hdfs.EvictWritersResponseProto);
*/
public abstract void evictWriters(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc getDatanodeInfo(.hadoop.hdfs.GetDatanodeInfoRequestProto) returns (.hadoop.hdfs.GetDatanodeInfoResponseProto);
*/
public abstract void getDatanodeInfo(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc getVolumeReport(.hadoop.hdfs.GetVolumeReportRequestProto) returns (.hadoop.hdfs.GetVolumeReportResponseProto);
*/
public abstract void getVolumeReport(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc getReconfigurationStatus(.hadoop.hdfs.GetReconfigurationStatusRequestProto) returns (.hadoop.hdfs.GetReconfigurationStatusResponseProto);
*/
public abstract void getReconfigurationStatus(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc startReconfiguration(.hadoop.hdfs.StartReconfigurationRequestProto) returns (.hadoop.hdfs.StartReconfigurationResponseProto);
*/
public abstract void startReconfiguration(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc listReconfigurableProperties(.hadoop.hdfs.ListReconfigurablePropertiesRequestProto) returns (.hadoop.hdfs.ListReconfigurablePropertiesResponseProto);
*/
public abstract void listReconfigurableProperties(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc triggerBlockReport(.hadoop.hdfs.TriggerBlockReportRequestProto) returns (.hadoop.hdfs.TriggerBlockReportResponseProto);
*/
public abstract void triggerBlockReport(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc getBalancerBandwidth(.hadoop.hdfs.GetBalancerBandwidthRequestProto) returns (.hadoop.hdfs.GetBalancerBandwidthResponseProto);
*
*
**
* Returns the balancer bandwidth value of datanode.
*
*/
public abstract void getBalancerBandwidth(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc submitDiskBalancerPlan(.hadoop.hdfs.SubmitDiskBalancerPlanRequestProto) returns (.hadoop.hdfs.SubmitDiskBalancerPlanResponseProto);
*
*
**
* Submit a disk balancer plan for execution
*
*/
public abstract void submitDiskBalancerPlan(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc cancelDiskBalancerPlan(.hadoop.hdfs.CancelPlanRequestProto) returns (.hadoop.hdfs.CancelPlanResponseProto);
*
*
**
* Cancel an executing plan
*
*/
public abstract void cancelDiskBalancerPlan(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc queryDiskBalancerPlan(.hadoop.hdfs.QueryPlanStatusRequestProto) returns (.hadoop.hdfs.QueryPlanStatusResponseProto);
*
*
**
* Gets the status of an executing Plan
*
*/
public abstract void queryDiskBalancerPlan(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
/**
* rpc getDiskBalancerSetting(.hadoop.hdfs.DiskBalancerSettingRequestProto) returns (.hadoop.hdfs.DiskBalancerSettingResponseProto);
*
*
**
* Gets run-time settings of Disk Balancer.
*
*/
public abstract void getDiskBalancerSetting(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done);
public static final
org.apache.hadoop.shaded.com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.getDescriptor().getServices().get(0);
}
public final org.apache.hadoop.shaded.com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptorForType() {
return getDescriptor();
}
public final void callMethod(
org.apache.hadoop.shaded.com.google.protobuf.Descriptors.MethodDescriptor method,
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.com.google.protobuf.Message request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback<
org.apache.hadoop.shaded.com.google.protobuf.Message> done) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.callMethod() given method descriptor for wrong " +
"service type.");
}
switch(method.getIndex()) {
case 0:
this.getReplicaVisibleLength(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto)request,
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 1:
this.refreshNamenodes(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto)request,
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 2:
this.deleteBlockPool(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto)request,
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 3:
this.getBlockLocalPathInfo(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto)request,
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 4:
this.shutdownDatanode(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto)request,
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 5:
this.evictWriters(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto)request,
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 6:
this.getDatanodeInfo(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto)request,
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 7:
this.getVolumeReport(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto)request,
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 8:
this.getReconfigurationStatus(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusRequestProto)request,
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 9:
this.startReconfiguration(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationRequestProto)request,
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 10:
this.listReconfigurableProperties(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesRequestProto)request,
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 11:
this.triggerBlockReport(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto)request,
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 12:
this.getBalancerBandwidth(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto)request,
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 13:
this.submitDiskBalancerPlan(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto)request,
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 14:
this.cancelDiskBalancerPlan(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto)request,
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 15:
this.queryDiskBalancerPlan(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto)request,
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 16:
this.getDiskBalancerSetting(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto)request,
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final org.apache.hadoop.shaded.com.google.protobuf.Message
getRequestPrototype(
org.apache.hadoop.shaded.com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getRequestPrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.getDefaultInstance();
case 1:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.getDefaultInstance();
case 2:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.getDefaultInstance();
case 3:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.getDefaultInstance();
case 4:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto.getDefaultInstance();
case 5:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto.getDefaultInstance();
case 6:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto.getDefaultInstance();
case 7:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto.getDefaultInstance();
case 8:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusRequestProto.getDefaultInstance();
case 9:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationRequestProto.getDefaultInstance();
case 10:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesRequestProto.getDefaultInstance();
case 11:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto.getDefaultInstance();
case 12:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto.getDefaultInstance();
case 13:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto.getDefaultInstance();
case 14:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto.getDefaultInstance();
case 15:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto.getDefaultInstance();
case 16:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final org.apache.hadoop.shaded.com.google.protobuf.Message
getResponsePrototype(
org.apache.hadoop.shaded.com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getResponsePrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance();
case 1:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance();
case 2:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance();
case 3:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance();
case 4:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.getDefaultInstance();
case 5:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto.getDefaultInstance();
case 6:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.getDefaultInstance();
case 7:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto.getDefaultInstance();
case 8:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusResponseProto.getDefaultInstance();
case 9:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationResponseProto.getDefaultInstance();
case 10:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesResponseProto.getDefaultInstance();
case 11:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.getDefaultInstance();
case 12:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto.getDefaultInstance();
case 13:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto.getDefaultInstance();
case 14:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto.getDefaultInstance();
case 15:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto.getDefaultInstance();
case 16:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public static Stub newStub(
org.apache.hadoop.shaded.com.google.protobuf.RpcChannel channel) {
return new Stub(channel);
}
public static final class Stub extends org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService implements Interface {
private Stub(org.apache.hadoop.shaded.com.google.protobuf.RpcChannel channel) {
this.channel = channel;
}
private final org.apache.hadoop.shaded.com.google.protobuf.RpcChannel channel;
public org.apache.hadoop.shaded.com.google.protobuf.RpcChannel getChannel() {
return channel;
}
public void getReplicaVisibleLength(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(0),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance()));
}
public void refreshNamenodes(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(1),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance()));
}
public void deleteBlockPool(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(2),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance()));
}
public void getBlockLocalPathInfo(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(3),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance()));
}
public void shutdownDatanode(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(4),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.getDefaultInstance()));
}
public void evictWriters(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(5),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto.getDefaultInstance()));
}
public void getDatanodeInfo(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(6),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.getDefaultInstance()));
}
public void getVolumeReport(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(7),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto.getDefaultInstance()));
}
public void getReconfigurationStatus(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(8),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusResponseProto.getDefaultInstance()));
}
public void startReconfiguration(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(9),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationResponseProto.getDefaultInstance()));
}
public void listReconfigurableProperties(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(10),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesResponseProto.getDefaultInstance()));
}
public void triggerBlockReport(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(11),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.getDefaultInstance()));
}
public void getBalancerBandwidth(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(12),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto.getDefaultInstance()));
}
public void submitDiskBalancerPlan(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(13),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto.getDefaultInstance()));
}
public void cancelDiskBalancerPlan(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(14),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto.getDefaultInstance()));
}
public void queryDiskBalancerPlan(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(15),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto.getDefaultInstance()));
}
public void getDiskBalancerSetting(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto request,
org.apache.hadoop.shaded.com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(16),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto.getDefaultInstance()));
}
}
public static BlockingInterface newBlockingStub(
org.apache.hadoop.shaded.com.google.protobuf.BlockingRpcChannel channel) {
return new BlockingStub(channel);
}
public interface BlockingInterface {
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto getReplicaVisibleLength(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto refreshNamenodes(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto deleteBlockPool(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto getBlockLocalPathInfo(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto shutdownDatanode(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto evictWriters(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto getDatanodeInfo(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto getVolumeReport(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusResponseProto getReconfigurationStatus(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationResponseProto startReconfiguration(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesResponseProto listReconfigurableProperties(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto triggerBlockReport(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto getBalancerBandwidth(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto submitDiskBalancerPlan(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto cancelDiskBalancerPlan(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto queryDiskBalancerPlan(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto getDiskBalancerSetting(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException;
}
private static final class BlockingStub implements BlockingInterface {
private BlockingStub(org.apache.hadoop.shaded.com.google.protobuf.BlockingRpcChannel channel) {
this.channel = channel;
}
private final org.apache.hadoop.shaded.com.google.protobuf.BlockingRpcChannel channel;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto getReplicaVisibleLength(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(0),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto refreshNamenodes(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(1),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto deleteBlockPool(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(2),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto getBlockLocalPathInfo(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(3),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto shutdownDatanode(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(4),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto evictWriters(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(5),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto getDatanodeInfo(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(6),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto getVolumeReport(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(7),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusResponseProto getReconfigurationStatus(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(8),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationResponseProto startReconfiguration(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(9),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesResponseProto listReconfigurableProperties(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(10),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto triggerBlockReport(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(11),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto getBalancerBandwidth(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(12),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto submitDiskBalancerPlan(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(13),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto cancelDiskBalancerPlan(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(14),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto queryDiskBalancerPlan(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(15),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto getDiskBalancerSetting(
org.apache.hadoop.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto request)
throws org.apache.hadoop.shaded.com.google.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(16),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto.getDefaultInstance());
}
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ClientDatanodeProtocolService)
}
private static org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetReplicaVisibleLengthRequestProto_descriptor;
private static
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_GetReplicaVisibleLengthRequestProto_fieldAccessorTable;
private static org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetReplicaVisibleLengthResponseProto_descriptor;
private static
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_GetReplicaVisibleLengthResponseProto_fieldAccessorTable;
private static org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_RefreshNamenodesRequestProto_descriptor;
private static
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_RefreshNamenodesRequestProto_fieldAccessorTable;
private static org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_RefreshNamenodesResponseProto_descriptor;
private static
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_RefreshNamenodesResponseProto_fieldAccessorTable;
private static org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_DeleteBlockPoolRequestProto_descriptor;
private static
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_DeleteBlockPoolRequestProto_fieldAccessorTable;
private static org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_DeleteBlockPoolResponseProto_descriptor;
private static
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_DeleteBlockPoolResponseProto_fieldAccessorTable;
private static org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetBlockLocalPathInfoRequestProto_descriptor;
private static
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_GetBlockLocalPathInfoRequestProto_fieldAccessorTable;
private static org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetBlockLocalPathInfoResponseProto_descriptor;
private static
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_GetBlockLocalPathInfoResponseProto_fieldAccessorTable;
private static org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ShutdownDatanodeRequestProto_descriptor;
private static
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_ShutdownDatanodeRequestProto_fieldAccessorTable;
private static org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ShutdownDatanodeResponseProto_descriptor;
private static
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_ShutdownDatanodeResponseProto_fieldAccessorTable;
private static org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_EvictWritersRequestProto_descriptor;
private static
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_EvictWritersRequestProto_fieldAccessorTable;
private static org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_EvictWritersResponseProto_descriptor;
private static
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_EvictWritersResponseProto_fieldAccessorTable;
private static org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetDatanodeInfoRequestProto_descriptor;
private static
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_GetDatanodeInfoRequestProto_fieldAccessorTable;
private static org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetDatanodeInfoResponseProto_descriptor;
private static
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_GetDatanodeInfoResponseProto_fieldAccessorTable;
private static org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetVolumeReportRequestProto_descriptor;
private static
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_GetVolumeReportRequestProto_fieldAccessorTable;
private static org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetVolumeReportResponseProto_descriptor;
private static
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_GetVolumeReportResponseProto_fieldAccessorTable;
private static org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_TriggerBlockReportRequestProto_descriptor;
private static
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_TriggerBlockReportRequestProto_fieldAccessorTable;
private static org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_TriggerBlockReportResponseProto_descriptor;
private static
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_TriggerBlockReportResponseProto_fieldAccessorTable;
private static org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetBalancerBandwidthRequestProto_descriptor;
private static
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_GetBalancerBandwidthRequestProto_fieldAccessorTable;
private static org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetBalancerBandwidthResponseProto_descriptor;
private static
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_GetBalancerBandwidthResponseProto_fieldAccessorTable;
private static org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_SubmitDiskBalancerPlanRequestProto_descriptor;
private static
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_SubmitDiskBalancerPlanRequestProto_fieldAccessorTable;
private static org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_SubmitDiskBalancerPlanResponseProto_descriptor;
private static
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_SubmitDiskBalancerPlanResponseProto_fieldAccessorTable;
private static org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_CancelPlanRequestProto_descriptor;
private static
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_CancelPlanRequestProto_fieldAccessorTable;
private static org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_CancelPlanResponseProto_descriptor;
private static
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_CancelPlanResponseProto_fieldAccessorTable;
private static org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_QueryPlanStatusRequestProto_descriptor;
private static
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_QueryPlanStatusRequestProto_fieldAccessorTable;
private static org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_QueryPlanStatusResponseProto_descriptor;
private static
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_QueryPlanStatusResponseProto_fieldAccessorTable;
private static org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_DiskBalancerSettingRequestProto_descriptor;
private static
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_DiskBalancerSettingRequestProto_fieldAccessorTable;
private static org.apache.hadoop.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_DiskBalancerSettingResponseProto_descriptor;
private static
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_DiskBalancerSettingResponseProto_fieldAccessorTable;
public static org.apache.hadoop.shaded.com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
}
private static org.apache.hadoop.shaded.com.google.protobuf.Descriptors.FileDescriptor
descriptor;
static {
java.lang.String[] descriptorData = {
"\n\034ClientDatanodeProtocol.proto\022\013hadoop.h" +
"dfs\032\016Security.proto\032\nhdfs.proto\032\035Reconfi" +
"gurationProtocol.proto\"U\n#GetReplicaVisi" +
"bleLengthRequestProto\022.\n\005block\030\001 \002(\0132\037.h" +
"adoop.hdfs.ExtendedBlockProto\"6\n$GetRepl" +
"icaVisibleLengthResponseProto\022\016\n\006length\030" +
"\001 \002(\004\"\036\n\034RefreshNamenodesRequestProto\"\037\n" +
"\035RefreshNamenodesResponseProto\"?\n\033Delete" +
"BlockPoolRequestProto\022\021\n\tblockPool\030\001 \002(\t" +
"\022\r\n\005force\030\002 \002(\010\"\036\n\034DeleteBlockPoolRespon",
"seProto\"}\n!GetBlockLocalPathInfoRequestP" +
"roto\022.\n\005block\030\001 \002(\0132\037.hadoop.hdfs.Extend" +
"edBlockProto\022(\n\005token\030\002 \002(\0132\031.hadoop.org.apache.hadoop.shaded.com. +
"mon.TokenProto\"~\n\"GetBlockLocalPathInfoR" +
"esponseProto\022.\n\005block\030\001 \002(\0132\037.hadoop.hdf" +
"s.ExtendedBlockProto\022\021\n\tlocalPath\030\002 \002(\t\022" +
"\025\n\rlocalMetaPath\030\003 \002(\t\"2\n\034ShutdownDatano" +
"deRequestProto\022\022\n\nforUpgrade\030\001 \002(\010\"\037\n\035Sh" +
"utdownDatanodeResponseProto\"\032\n\030EvictWrit" +
"ersRequestProto\"\033\n\031EvictWritersResponseP",
"roto\"\035\n\033GetDatanodeInfoRequestProto\"V\n\034G" +
"etDatanodeInfoResponseProto\0226\n\tlocalInfo" +
"\030\001 \002(\0132#.hadoop.hdfs.DatanodeLocalInfoPr" +
"oto\"\035\n\033GetVolumeReportRequestProto\"X\n\034Ge" +
"tVolumeReportResponseProto\0228\n\nvolumeInfo" +
"\030\001 \003(\0132$.hadoop.hdfs.DatanodeVolumeInfoP" +
"roto\"H\n\036TriggerBlockReportRequestProto\022\023" +
"\n\013incremental\030\001 \002(\010\022\021\n\tnnAddress\030\002 \001(\t\"!" +
"\n\037TriggerBlockReportResponseProto\"\"\n Get" +
"BalancerBandwidthRequestProto\"6\n!GetBala",
"ncerBandwidthResponseProto\022\021\n\tbandwidth\030" +
"\001 \002(\004\"\202\001\n\"SubmitDiskBalancerPlanRequestP" +
"roto\022\016\n\006planID\030\001 \002(\t\022\014\n\004plan\030\002 \002(\t\022\023\n\013pl" +
"anVersion\030\003 \001(\004\022\027\n\017ignoreDateCheck\030\004 \001(\010" +
"\022\020\n\010planFile\030\005 \002(\t\"%\n#SubmitDiskBalancer" +
"PlanResponseProto\"(\n\026CancelPlanRequestPr" +
"oto\022\016\n\006planID\030\001 \002(\t\"\031\n\027CancelPlanRespons" +
"eProto\"\035\n\033QueryPlanStatusRequestProto\"g\n" +
"\034QueryPlanStatusResponseProto\022\016\n\006result\030" +
"\001 \001(\r\022\016\n\006planID\030\002 \001(\t\022\025\n\rcurrentStatus\030\003",
" \001(\t\022\020\n\010planFile\030\004 \001(\t\".\n\037DiskBalancerSe" +
"ttingRequestProto\022\013\n\003key\030\001 \002(\t\"1\n DiskBa" +
"lancerSettingResponseProto\022\r\n\005value\030\001 \002(" +
"\t2\300\017\n\035ClientDatanodeProtocolService\022~\n\027g" +
"etReplicaVisibleLength\0220.hadoop.hdfs.Get" +
"ReplicaVisibleLengthRequestProto\0321.hadoo" +
"p.hdfs.GetReplicaVisibleLengthResponsePr" +
"oto\022i\n\020refreshNamenodes\022).hadoop.hdfs.Re" +
"freshNamenodesRequestProto\032*.hadoop.hdfs" +
".RefreshNamenodesResponseProto\022f\n\017delete",
"BlockPool\022(.hadoop.hdfs.DeleteBlockPoolR" +
"equestProto\032).hadoop.hdfs.DeleteBlockPoo" +
"lResponseProto\022x\n\025getBlockLocalPathInfo\022" +
"..hadoop.hdfs.GetBlockLocalPathInfoReque" +
"stProto\032/.hadoop.hdfs.GetBlockLocalPathI" +
"nfoResponseProto\022i\n\020shutdownDatanode\022).h" +
"adoop.hdfs.ShutdownDatanodeRequestProto\032" +
"*.hadoop.hdfs.ShutdownDatanodeResponsePr" +
"oto\022]\n\014evictWriters\022%.hadoop.hdfs.EvictW" +
"ritersRequestProto\032&.hadoop.hdfs.EvictWr",
"itersResponseProto\022f\n\017getDatanodeInfo\022(." +
"hadoop.hdfs.GetDatanodeInfoRequestProto\032" +
").hadoop.hdfs.GetDatanodeInfoResponsePro" +
"to\022f\n\017getVolumeReport\022(.hadoop.hdfs.GetV" +
"olumeReportRequestProto\032).hadoop.hdfs.Ge" +
"tVolumeReportResponseProto\022\201\001\n\030getReconf" +
"igurationStatus\0221.hadoop.hdfs.GetReconfi" +
"gurationStatusRequestProto\0322.hadoop.hdfs" +
".GetReconfigurationStatusResponseProto\022u" +
"\n\024startReconfiguration\022-.hadoop.hdfs.Sta",
"rtReconfigurationRequestProto\032..hadoop.h" +
"dfs.StartReconfigurationResponseProto\022\215\001" +
"\n\034listReconfigurableProperties\0225.hadoop." +
"hdfs.ListReconfigurablePropertiesRequest" +
"Proto\0326.hadoop.hdfs.ListReconfigurablePr" +
"opertiesResponseProto\022o\n\022triggerBlockRep" +
"ort\022+.hadoop.hdfs.TriggerBlockReportRequ" +
"estProto\032,.hadoop.hdfs.TriggerBlockRepor" +
"tResponseProto\022u\n\024getBalancerBandwidth\022-" +
".hadoop.hdfs.GetBalancerBandwidthRequest",
"Proto\032..hadoop.hdfs.GetBalancerBandwidth" +
"ResponseProto\022{\n\026submitDiskBalancerPlan\022" +
"/.hadoop.hdfs.SubmitDiskBalancerPlanRequ" +
"estProto\0320.hadoop.hdfs.SubmitDiskBalance" +
"rPlanResponseProto\022c\n\026cancelDiskBalancer" +
"Plan\022#.hadoop.hdfs.CancelPlanRequestProt" +
"o\032$.hadoop.hdfs.CancelPlanResponseProto\022" +
"l\n\025queryDiskBalancerPlan\022(.hadoop.hdfs.Q" +
"ueryPlanStatusRequestProto\032).hadoop.hdfs" +
".QueryPlanStatusResponseProto\022u\n\026getDisk",
"BalancerSetting\022,.hadoop.hdfs.DiskBalanc" +
"erSettingRequestProto\032-.hadoop.hdfs.Disk" +
"BalancerSettingResponseProtoBK\n%org.apache.hadoop.shaded.org.apac" +
"he.hadoop.hdfs.protocol.protoB\034ClientDat" +
"anodeProtocolProtos\210\001\001\240\001\001"
};
org.apache.hadoop.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new org.apache.hadoop.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
public org.apache.hadoop.shaded.com.google.protobuf.ExtensionRegistry assignDescriptors(
org.apache.hadoop.shaded.com.google.protobuf.Descriptors.FileDescriptor root) {
descriptor = root;
internal_static_hadoop_hdfs_GetReplicaVisibleLengthRequestProto_descriptor =
getDescriptor().getMessageTypes().get(0);
internal_static_hadoop_hdfs_GetReplicaVisibleLengthRequestProto_fieldAccessorTable = new
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_GetReplicaVisibleLengthRequestProto_descriptor,
new java.lang.String[] { "Block", });
internal_static_hadoop_hdfs_GetReplicaVisibleLengthResponseProto_descriptor =
getDescriptor().getMessageTypes().get(1);
internal_static_hadoop_hdfs_GetReplicaVisibleLengthResponseProto_fieldAccessorTable = new
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_GetReplicaVisibleLengthResponseProto_descriptor,
new java.lang.String[] { "Length", });
internal_static_hadoop_hdfs_RefreshNamenodesRequestProto_descriptor =
getDescriptor().getMessageTypes().get(2);
internal_static_hadoop_hdfs_RefreshNamenodesRequestProto_fieldAccessorTable = new
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_RefreshNamenodesRequestProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_RefreshNamenodesResponseProto_descriptor =
getDescriptor().getMessageTypes().get(3);
internal_static_hadoop_hdfs_RefreshNamenodesResponseProto_fieldAccessorTable = new
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_RefreshNamenodesResponseProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_DeleteBlockPoolRequestProto_descriptor =
getDescriptor().getMessageTypes().get(4);
internal_static_hadoop_hdfs_DeleteBlockPoolRequestProto_fieldAccessorTable = new
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_DeleteBlockPoolRequestProto_descriptor,
new java.lang.String[] { "BlockPool", "Force", });
internal_static_hadoop_hdfs_DeleteBlockPoolResponseProto_descriptor =
getDescriptor().getMessageTypes().get(5);
internal_static_hadoop_hdfs_DeleteBlockPoolResponseProto_fieldAccessorTable = new
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_DeleteBlockPoolResponseProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_GetBlockLocalPathInfoRequestProto_descriptor =
getDescriptor().getMessageTypes().get(6);
internal_static_hadoop_hdfs_GetBlockLocalPathInfoRequestProto_fieldAccessorTable = new
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_GetBlockLocalPathInfoRequestProto_descriptor,
new java.lang.String[] { "Block", "Token", });
internal_static_hadoop_hdfs_GetBlockLocalPathInfoResponseProto_descriptor =
getDescriptor().getMessageTypes().get(7);
internal_static_hadoop_hdfs_GetBlockLocalPathInfoResponseProto_fieldAccessorTable = new
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_GetBlockLocalPathInfoResponseProto_descriptor,
new java.lang.String[] { "Block", "LocalPath", "LocalMetaPath", });
internal_static_hadoop_hdfs_ShutdownDatanodeRequestProto_descriptor =
getDescriptor().getMessageTypes().get(8);
internal_static_hadoop_hdfs_ShutdownDatanodeRequestProto_fieldAccessorTable = new
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_ShutdownDatanodeRequestProto_descriptor,
new java.lang.String[] { "ForUpgrade", });
internal_static_hadoop_hdfs_ShutdownDatanodeResponseProto_descriptor =
getDescriptor().getMessageTypes().get(9);
internal_static_hadoop_hdfs_ShutdownDatanodeResponseProto_fieldAccessorTable = new
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_ShutdownDatanodeResponseProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_EvictWritersRequestProto_descriptor =
getDescriptor().getMessageTypes().get(10);
internal_static_hadoop_hdfs_EvictWritersRequestProto_fieldAccessorTable = new
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_EvictWritersRequestProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_EvictWritersResponseProto_descriptor =
getDescriptor().getMessageTypes().get(11);
internal_static_hadoop_hdfs_EvictWritersResponseProto_fieldAccessorTable = new
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_EvictWritersResponseProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_GetDatanodeInfoRequestProto_descriptor =
getDescriptor().getMessageTypes().get(12);
internal_static_hadoop_hdfs_GetDatanodeInfoRequestProto_fieldAccessorTable = new
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_GetDatanodeInfoRequestProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_GetDatanodeInfoResponseProto_descriptor =
getDescriptor().getMessageTypes().get(13);
internal_static_hadoop_hdfs_GetDatanodeInfoResponseProto_fieldAccessorTable = new
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_GetDatanodeInfoResponseProto_descriptor,
new java.lang.String[] { "LocalInfo", });
internal_static_hadoop_hdfs_GetVolumeReportRequestProto_descriptor =
getDescriptor().getMessageTypes().get(14);
internal_static_hadoop_hdfs_GetVolumeReportRequestProto_fieldAccessorTable = new
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_GetVolumeReportRequestProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_GetVolumeReportResponseProto_descriptor =
getDescriptor().getMessageTypes().get(15);
internal_static_hadoop_hdfs_GetVolumeReportResponseProto_fieldAccessorTable = new
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_GetVolumeReportResponseProto_descriptor,
new java.lang.String[] { "VolumeInfo", });
internal_static_hadoop_hdfs_TriggerBlockReportRequestProto_descriptor =
getDescriptor().getMessageTypes().get(16);
internal_static_hadoop_hdfs_TriggerBlockReportRequestProto_fieldAccessorTable = new
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_TriggerBlockReportRequestProto_descriptor,
new java.lang.String[] { "Incremental", "NnAddress", });
internal_static_hadoop_hdfs_TriggerBlockReportResponseProto_descriptor =
getDescriptor().getMessageTypes().get(17);
internal_static_hadoop_hdfs_TriggerBlockReportResponseProto_fieldAccessorTable = new
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_TriggerBlockReportResponseProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_GetBalancerBandwidthRequestProto_descriptor =
getDescriptor().getMessageTypes().get(18);
internal_static_hadoop_hdfs_GetBalancerBandwidthRequestProto_fieldAccessorTable = new
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_GetBalancerBandwidthRequestProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_GetBalancerBandwidthResponseProto_descriptor =
getDescriptor().getMessageTypes().get(19);
internal_static_hadoop_hdfs_GetBalancerBandwidthResponseProto_fieldAccessorTable = new
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_GetBalancerBandwidthResponseProto_descriptor,
new java.lang.String[] { "Bandwidth", });
internal_static_hadoop_hdfs_SubmitDiskBalancerPlanRequestProto_descriptor =
getDescriptor().getMessageTypes().get(20);
internal_static_hadoop_hdfs_SubmitDiskBalancerPlanRequestProto_fieldAccessorTable = new
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_SubmitDiskBalancerPlanRequestProto_descriptor,
new java.lang.String[] { "PlanID", "Plan", "PlanVersion", "IgnoreDateCheck", "PlanFile", });
internal_static_hadoop_hdfs_SubmitDiskBalancerPlanResponseProto_descriptor =
getDescriptor().getMessageTypes().get(21);
internal_static_hadoop_hdfs_SubmitDiskBalancerPlanResponseProto_fieldAccessorTable = new
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_SubmitDiskBalancerPlanResponseProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_CancelPlanRequestProto_descriptor =
getDescriptor().getMessageTypes().get(22);
internal_static_hadoop_hdfs_CancelPlanRequestProto_fieldAccessorTable = new
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_CancelPlanRequestProto_descriptor,
new java.lang.String[] { "PlanID", });
internal_static_hadoop_hdfs_CancelPlanResponseProto_descriptor =
getDescriptor().getMessageTypes().get(23);
internal_static_hadoop_hdfs_CancelPlanResponseProto_fieldAccessorTable = new
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_CancelPlanResponseProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_QueryPlanStatusRequestProto_descriptor =
getDescriptor().getMessageTypes().get(24);
internal_static_hadoop_hdfs_QueryPlanStatusRequestProto_fieldAccessorTable = new
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_QueryPlanStatusRequestProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_QueryPlanStatusResponseProto_descriptor =
getDescriptor().getMessageTypes().get(25);
internal_static_hadoop_hdfs_QueryPlanStatusResponseProto_fieldAccessorTable = new
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_QueryPlanStatusResponseProto_descriptor,
new java.lang.String[] { "Result", "PlanID", "CurrentStatus", "PlanFile", });
internal_static_hadoop_hdfs_DiskBalancerSettingRequestProto_descriptor =
getDescriptor().getMessageTypes().get(26);
internal_static_hadoop_hdfs_DiskBalancerSettingRequestProto_fieldAccessorTable = new
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_DiskBalancerSettingRequestProto_descriptor,
new java.lang.String[] { "Key", });
internal_static_hadoop_hdfs_DiskBalancerSettingResponseProto_descriptor =
getDescriptor().getMessageTypes().get(27);
internal_static_hadoop_hdfs_DiskBalancerSettingResponseProto_fieldAccessorTable = new
org.apache.hadoop.shaded.com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_DiskBalancerSettingResponseProto_descriptor,
new java.lang.String[] { "Value", });
return null;
}
};
org.apache.hadoop.shaded.com.google.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new org.apache.hadoop.shaded.com.google.protobuf.Descriptors.FileDescriptor[] {
org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.getDescriptor(),
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(),
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.getDescriptor(),
}, assigner);
}
// @@protoc_insertion_point(outer_class_scope)
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy