org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos Maven / Gradle / Ivy
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: ClientDatanodeProtocol.proto
package org.apache.hadoop.hdfs.protocol.proto;
public final class ClientDatanodeProtocolProtos {
private ClientDatanodeProtocolProtos() {}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
}
public interface GetReplicaVisibleLengthRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.ExtendedBlockProto block = 1;
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
boolean hasBlock();
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock();
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.GetReplicaVisibleLengthRequestProto}
*
*
**
* block - block for which visible length is requested
*
*/
public static final class GetReplicaVisibleLengthRequestProto extends
com.google.protobuf.GeneratedMessage
implements GetReplicaVisibleLengthRequestProtoOrBuilder {
// Use GetReplicaVisibleLengthRequestProto.newBuilder() to construct.
private GetReplicaVisibleLengthRequestProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private GetReplicaVisibleLengthRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final GetReplicaVisibleLengthRequestProto defaultInstance;
public static GetReplicaVisibleLengthRequestProto getDefaultInstance() {
return defaultInstance;
}
public GetReplicaVisibleLengthRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetReplicaVisibleLengthRequestProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = block_.toBuilder();
}
block_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(block_);
block_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReplicaVisibleLengthRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReplicaVisibleLengthRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public GetReplicaVisibleLengthRequestProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new GetReplicaVisibleLengthRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.ExtendedBlockProto block = 1;
public static final int BLOCK_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_;
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public boolean hasBlock() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
return block_;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
return block_;
}
private void initFields() {
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasBlock()) {
memoizedIsInitialized = 0;
return false;
}
if (!getBlock().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, block_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, block_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto) obj;
boolean result = true;
result = result && (hasBlock() == other.hasBlock());
if (hasBlock()) {
result = result && getBlock()
.equals(other.getBlock());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasBlock()) {
hash = (37 * hash) + BLOCK_FIELD_NUMBER;
hash = (53 * hash) + getBlock().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.GetReplicaVisibleLengthRequestProto}
*
*
**
* block - block for which visible length is requested
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReplicaVisibleLengthRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReplicaVisibleLengthRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getBlockFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (blockBuilder_ == null) {
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReplicaVisibleLengthRequestProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (blockBuilder_ == null) {
result.block_ = block_;
} else {
result.block_ = blockBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.getDefaultInstance()) return this;
if (other.hasBlock()) {
mergeBlock(other.getBlock());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasBlock()) {
return false;
}
if (!getBlock().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.ExtendedBlockProto block = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_;
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public boolean hasBlock() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
if (blockBuilder_ == null) {
return block_;
} else {
return blockBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
if (blockBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
block_ = value;
onChanged();
} else {
blockBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder setBlock(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) {
if (blockBuilder_ == null) {
block_ = builderForValue.build();
onChanged();
} else {
blockBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
if (blockBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) {
block_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial();
} else {
block_ = value;
}
onChanged();
} else {
blockBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder clearBlock() {
if (blockBuilder_ == null) {
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
onChanged();
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getBlockFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
if (blockBuilder_ != null) {
return blockBuilder_.getMessageOrBuilder();
} else {
return block_;
}
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>
getBlockFieldBuilder() {
if (blockBuilder_ == null) {
blockBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>(
block_,
getParentForChildren(),
isClean());
block_ = null;
}
return blockBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetReplicaVisibleLengthRequestProto)
}
static {
defaultInstance = new GetReplicaVisibleLengthRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetReplicaVisibleLengthRequestProto)
}
public interface GetReplicaVisibleLengthResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required uint64 length = 1;
/**
* required uint64 length = 1;
*/
boolean hasLength();
/**
* required uint64 length = 1;
*/
long getLength();
}
/**
* Protobuf type {@code hadoop.hdfs.GetReplicaVisibleLengthResponseProto}
*
*
**
* length - visible length of the block
*
*/
public static final class GetReplicaVisibleLengthResponseProto extends
com.google.protobuf.GeneratedMessage
implements GetReplicaVisibleLengthResponseProtoOrBuilder {
// Use GetReplicaVisibleLengthResponseProto.newBuilder() to construct.
private GetReplicaVisibleLengthResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private GetReplicaVisibleLengthResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final GetReplicaVisibleLengthResponseProto defaultInstance;
public static GetReplicaVisibleLengthResponseProto getDefaultInstance() {
return defaultInstance;
}
public GetReplicaVisibleLengthResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetReplicaVisibleLengthResponseProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
length_ = input.readUInt64();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReplicaVisibleLengthResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReplicaVisibleLengthResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public GetReplicaVisibleLengthResponseProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new GetReplicaVisibleLengthResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required uint64 length = 1;
public static final int LENGTH_FIELD_NUMBER = 1;
private long length_;
/**
* required uint64 length = 1;
*/
public boolean hasLength() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint64 length = 1;
*/
public long getLength() {
return length_;
}
private void initFields() {
length_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasLength()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt64(1, length_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(1, length_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto) obj;
boolean result = true;
result = result && (hasLength() == other.hasLength());
if (hasLength()) {
result = result && (getLength()
== other.getLength());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasLength()) {
hash = (37 * hash) + LENGTH_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getLength());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.GetReplicaVisibleLengthResponseProto}
*
*
**
* length - visible length of the block
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReplicaVisibleLengthResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReplicaVisibleLengthResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
length_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReplicaVisibleLengthResponseProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.length_ = length_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance()) return this;
if (other.hasLength()) {
setLength(other.getLength());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasLength()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required uint64 length = 1;
private long length_ ;
/**
* required uint64 length = 1;
*/
public boolean hasLength() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint64 length = 1;
*/
public long getLength() {
return length_;
}
/**
* required uint64 length = 1;
*/
public Builder setLength(long value) {
bitField0_ |= 0x00000001;
length_ = value;
onChanged();
return this;
}
/**
* required uint64 length = 1;
*/
public Builder clearLength() {
bitField0_ = (bitField0_ & ~0x00000001);
length_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetReplicaVisibleLengthResponseProto)
}
static {
defaultInstance = new GetReplicaVisibleLengthResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetReplicaVisibleLengthResponseProto)
}
public interface RefreshNamenodesRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.RefreshNamenodesRequestProto}
*
*
**
* void request
*
*/
public static final class RefreshNamenodesRequestProto extends
com.google.protobuf.GeneratedMessage
implements RefreshNamenodesRequestProtoOrBuilder {
// Use RefreshNamenodesRequestProto.newBuilder() to construct.
private RefreshNamenodesRequestProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private RefreshNamenodesRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final RefreshNamenodesRequestProto defaultInstance;
public static RefreshNamenodesRequestProto getDefaultInstance() {
return defaultInstance;
}
public RefreshNamenodesRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private RefreshNamenodesRequestProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNamenodesRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNamenodesRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public RefreshNamenodesRequestProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new RefreshNamenodesRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.RefreshNamenodesRequestProto}
*
*
**
* void request
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNamenodesRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNamenodesRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNamenodesRequestProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.RefreshNamenodesRequestProto)
}
static {
defaultInstance = new RefreshNamenodesRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.RefreshNamenodesRequestProto)
}
public interface RefreshNamenodesResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.RefreshNamenodesResponseProto}
*
*
**
* void response
*
*/
public static final class RefreshNamenodesResponseProto extends
com.google.protobuf.GeneratedMessage
implements RefreshNamenodesResponseProtoOrBuilder {
// Use RefreshNamenodesResponseProto.newBuilder() to construct.
private RefreshNamenodesResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private RefreshNamenodesResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final RefreshNamenodesResponseProto defaultInstance;
public static RefreshNamenodesResponseProto getDefaultInstance() {
return defaultInstance;
}
public RefreshNamenodesResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private RefreshNamenodesResponseProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNamenodesResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNamenodesResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public RefreshNamenodesResponseProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new RefreshNamenodesResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.RefreshNamenodesResponseProto}
*
*
**
* void response
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNamenodesResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNamenodesResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNamenodesResponseProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.RefreshNamenodesResponseProto)
}
static {
defaultInstance = new RefreshNamenodesResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.RefreshNamenodesResponseProto)
}
public interface DeleteBlockPoolRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string blockPool = 1;
/**
* required string blockPool = 1;
*/
boolean hasBlockPool();
/**
* required string blockPool = 1;
*/
java.lang.String getBlockPool();
/**
* required string blockPool = 1;
*/
com.google.protobuf.ByteString
getBlockPoolBytes();
// required bool force = 2;
/**
* required bool force = 2;
*/
boolean hasForce();
/**
* required bool force = 2;
*/
boolean getForce();
}
/**
* Protobuf type {@code hadoop.hdfs.DeleteBlockPoolRequestProto}
*
*
**
* blockPool - block pool to be deleted
* force - if false, delete the block pool only if it is empty.
* if true, delete the block pool even if it has blocks.
*
*/
public static final class DeleteBlockPoolRequestProto extends
com.google.protobuf.GeneratedMessage
implements DeleteBlockPoolRequestProtoOrBuilder {
// Use DeleteBlockPoolRequestProto.newBuilder() to construct.
private DeleteBlockPoolRequestProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private DeleteBlockPoolRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final DeleteBlockPoolRequestProto defaultInstance;
public static DeleteBlockPoolRequestProto getDefaultInstance() {
return defaultInstance;
}
public DeleteBlockPoolRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DeleteBlockPoolRequestProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
blockPool_ = input.readBytes();
break;
}
case 16: {
bitField0_ |= 0x00000002;
force_ = input.readBool();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DeleteBlockPoolRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DeleteBlockPoolRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public DeleteBlockPoolRequestProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new DeleteBlockPoolRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required string blockPool = 1;
public static final int BLOCKPOOL_FIELD_NUMBER = 1;
private java.lang.Object blockPool_;
/**
* required string blockPool = 1;
*/
public boolean hasBlockPool() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string blockPool = 1;
*/
public java.lang.String getBlockPool() {
java.lang.Object ref = blockPool_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
blockPool_ = s;
}
return s;
}
}
/**
* required string blockPool = 1;
*/
public com.google.protobuf.ByteString
getBlockPoolBytes() {
java.lang.Object ref = blockPool_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
blockPool_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required bool force = 2;
public static final int FORCE_FIELD_NUMBER = 2;
private boolean force_;
/**
* required bool force = 2;
*/
public boolean hasForce() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required bool force = 2;
*/
public boolean getForce() {
return force_;
}
private void initFields() {
blockPool_ = "";
force_ = false;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasBlockPool()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasForce()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getBlockPoolBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBool(2, force_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getBlockPoolBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(2, force_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto) obj;
boolean result = true;
result = result && (hasBlockPool() == other.hasBlockPool());
if (hasBlockPool()) {
result = result && getBlockPool()
.equals(other.getBlockPool());
}
result = result && (hasForce() == other.hasForce());
if (hasForce()) {
result = result && (getForce()
== other.getForce());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasBlockPool()) {
hash = (37 * hash) + BLOCKPOOL_FIELD_NUMBER;
hash = (53 * hash) + getBlockPool().hashCode();
}
if (hasForce()) {
hash = (37 * hash) + FORCE_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getForce());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.DeleteBlockPoolRequestProto}
*
*
**
* blockPool - block pool to be deleted
* force - if false, delete the block pool only if it is empty.
* if true, delete the block pool even if it has blocks.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DeleteBlockPoolRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DeleteBlockPoolRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
blockPool_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
force_ = false;
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DeleteBlockPoolRequestProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.blockPool_ = blockPool_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.force_ = force_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.getDefaultInstance()) return this;
if (other.hasBlockPool()) {
bitField0_ |= 0x00000001;
blockPool_ = other.blockPool_;
onChanged();
}
if (other.hasForce()) {
setForce(other.getForce());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasBlockPool()) {
return false;
}
if (!hasForce()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string blockPool = 1;
private java.lang.Object blockPool_ = "";
/**
* required string blockPool = 1;
*/
public boolean hasBlockPool() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string blockPool = 1;
*/
public java.lang.String getBlockPool() {
java.lang.Object ref = blockPool_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
blockPool_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string blockPool = 1;
*/
public com.google.protobuf.ByteString
getBlockPoolBytes() {
java.lang.Object ref = blockPool_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
blockPool_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string blockPool = 1;
*/
public Builder setBlockPool(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
blockPool_ = value;
onChanged();
return this;
}
/**
* required string blockPool = 1;
*/
public Builder clearBlockPool() {
bitField0_ = (bitField0_ & ~0x00000001);
blockPool_ = getDefaultInstance().getBlockPool();
onChanged();
return this;
}
/**
* required string blockPool = 1;
*/
public Builder setBlockPoolBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
blockPool_ = value;
onChanged();
return this;
}
// required bool force = 2;
private boolean force_ ;
/**
* required bool force = 2;
*/
public boolean hasForce() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required bool force = 2;
*/
public boolean getForce() {
return force_;
}
/**
* required bool force = 2;
*/
public Builder setForce(boolean value) {
bitField0_ |= 0x00000002;
force_ = value;
onChanged();
return this;
}
/**
* required bool force = 2;
*/
public Builder clearForce() {
bitField0_ = (bitField0_ & ~0x00000002);
force_ = false;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DeleteBlockPoolRequestProto)
}
static {
defaultInstance = new DeleteBlockPoolRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DeleteBlockPoolRequestProto)
}
public interface DeleteBlockPoolResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.DeleteBlockPoolResponseProto}
*
*
**
* void response
*
*/
public static final class DeleteBlockPoolResponseProto extends
com.google.protobuf.GeneratedMessage
implements DeleteBlockPoolResponseProtoOrBuilder {
// Use DeleteBlockPoolResponseProto.newBuilder() to construct.
private DeleteBlockPoolResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private DeleteBlockPoolResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final DeleteBlockPoolResponseProto defaultInstance;
public static DeleteBlockPoolResponseProto getDefaultInstance() {
return defaultInstance;
}
public DeleteBlockPoolResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DeleteBlockPoolResponseProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DeleteBlockPoolResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DeleteBlockPoolResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public DeleteBlockPoolResponseProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new DeleteBlockPoolResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.DeleteBlockPoolResponseProto}
*
*
**
* void response
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DeleteBlockPoolResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DeleteBlockPoolResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DeleteBlockPoolResponseProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DeleteBlockPoolResponseProto)
}
static {
defaultInstance = new DeleteBlockPoolResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DeleteBlockPoolResponseProto)
}
public interface GetBlockLocalPathInfoRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.ExtendedBlockProto block = 1;
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
boolean hasBlock();
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock();
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder();
// required .hadoop.common.TokenProto token = 2;
/**
* required .hadoop.common.TokenProto token = 2;
*/
boolean hasToken();
/**
* required .hadoop.common.TokenProto token = 2;
*/
org.apache.hadoop.security.proto.SecurityProtos.TokenProto getToken();
/**
* required .hadoop.common.TokenProto token = 2;
*/
org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getTokenOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.GetBlockLocalPathInfoRequestProto}
*
*
**
* Gets the file information where block and its metadata is stored
* block - block for which path information is being requested
* token - block token
*
* This message is deprecated in favor of file descriptor passing.
*
*/
public static final class GetBlockLocalPathInfoRequestProto extends
com.google.protobuf.GeneratedMessage
implements GetBlockLocalPathInfoRequestProtoOrBuilder {
// Use GetBlockLocalPathInfoRequestProto.newBuilder() to construct.
private GetBlockLocalPathInfoRequestProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private GetBlockLocalPathInfoRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final GetBlockLocalPathInfoRequestProto defaultInstance;
public static GetBlockLocalPathInfoRequestProto getDefaultInstance() {
return defaultInstance;
}
public GetBlockLocalPathInfoRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetBlockLocalPathInfoRequestProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = block_.toBuilder();
}
block_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(block_);
block_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
subBuilder = token_.toBuilder();
}
token_ = input.readMessage(org.apache.hadoop.security.proto.SecurityProtos.TokenProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(token_);
token_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocalPathInfoRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocalPathInfoRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public GetBlockLocalPathInfoRequestProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new GetBlockLocalPathInfoRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.ExtendedBlockProto block = 1;
public static final int BLOCK_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_;
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public boolean hasBlock() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
return block_;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
return block_;
}
// required .hadoop.common.TokenProto token = 2;
public static final int TOKEN_FIELD_NUMBER = 2;
private org.apache.hadoop.security.proto.SecurityProtos.TokenProto token_;
/**
* required .hadoop.common.TokenProto token = 2;
*/
public boolean hasToken() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required .hadoop.common.TokenProto token = 2;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getToken() {
return token_;
}
/**
* required .hadoop.common.TokenProto token = 2;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getTokenOrBuilder() {
return token_;
}
private void initFields() {
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
token_ = org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasBlock()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasToken()) {
memoizedIsInitialized = 0;
return false;
}
if (!getBlock().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
if (!getToken().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, block_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeMessage(2, token_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, block_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, token_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto) obj;
boolean result = true;
result = result && (hasBlock() == other.hasBlock());
if (hasBlock()) {
result = result && getBlock()
.equals(other.getBlock());
}
result = result && (hasToken() == other.hasToken());
if (hasToken()) {
result = result && getToken()
.equals(other.getToken());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasBlock()) {
hash = (37 * hash) + BLOCK_FIELD_NUMBER;
hash = (53 * hash) + getBlock().hashCode();
}
if (hasToken()) {
hash = (37 * hash) + TOKEN_FIELD_NUMBER;
hash = (53 * hash) + getToken().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.GetBlockLocalPathInfoRequestProto}
*
*
**
* Gets the file information where block and its metadata is stored
* block - block for which path information is being requested
* token - block token
*
* This message is deprecated in favor of file descriptor passing.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocalPathInfoRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocalPathInfoRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getBlockFieldBuilder();
getTokenFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (blockBuilder_ == null) {
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
if (tokenBuilder_ == null) {
token_ = org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance();
} else {
tokenBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocalPathInfoRequestProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (blockBuilder_ == null) {
result.block_ = block_;
} else {
result.block_ = blockBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
if (tokenBuilder_ == null) {
result.token_ = token_;
} else {
result.token_ = tokenBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.getDefaultInstance()) return this;
if (other.hasBlock()) {
mergeBlock(other.getBlock());
}
if (other.hasToken()) {
mergeToken(other.getToken());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasBlock()) {
return false;
}
if (!hasToken()) {
return false;
}
if (!getBlock().isInitialized()) {
return false;
}
if (!getToken().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.ExtendedBlockProto block = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_;
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public boolean hasBlock() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
if (blockBuilder_ == null) {
return block_;
} else {
return blockBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
if (blockBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
block_ = value;
onChanged();
} else {
blockBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder setBlock(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) {
if (blockBuilder_ == null) {
block_ = builderForValue.build();
onChanged();
} else {
blockBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
if (blockBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) {
block_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial();
} else {
block_ = value;
}
onChanged();
} else {
blockBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder clearBlock() {
if (blockBuilder_ == null) {
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
onChanged();
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getBlockFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
if (blockBuilder_ != null) {
return blockBuilder_.getMessageOrBuilder();
} else {
return block_;
}
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>
getBlockFieldBuilder() {
if (blockBuilder_ == null) {
blockBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>(
block_,
getParentForChildren(),
isClean());
block_ = null;
}
return blockBuilder_;
}
// required .hadoop.common.TokenProto token = 2;
private org.apache.hadoop.security.proto.SecurityProtos.TokenProto token_ = org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> tokenBuilder_;
/**
* required .hadoop.common.TokenProto token = 2;
*/
public boolean hasToken() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required .hadoop.common.TokenProto token = 2;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getToken() {
if (tokenBuilder_ == null) {
return token_;
} else {
return tokenBuilder_.getMessage();
}
}
/**
* required .hadoop.common.TokenProto token = 2;
*/
public Builder setToken(org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
if (tokenBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
token_ = value;
onChanged();
} else {
tokenBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* required .hadoop.common.TokenProto token = 2;
*/
public Builder setToken(
org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) {
if (tokenBuilder_ == null) {
token_ = builderForValue.build();
onChanged();
} else {
tokenBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
return this;
}
/**
* required .hadoop.common.TokenProto token = 2;
*/
public Builder mergeToken(org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
if (tokenBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002) &&
token_ != org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance()) {
token_ =
org.apache.hadoop.security.proto.SecurityProtos.TokenProto.newBuilder(token_).mergeFrom(value).buildPartial();
} else {
token_ = value;
}
onChanged();
} else {
tokenBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* required .hadoop.common.TokenProto token = 2;
*/
public Builder clearToken() {
if (tokenBuilder_ == null) {
token_ = org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance();
onChanged();
} else {
tokenBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
/**
* required .hadoop.common.TokenProto token = 2;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder getTokenBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getTokenFieldBuilder().getBuilder();
}
/**
* required .hadoop.common.TokenProto token = 2;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getTokenOrBuilder() {
if (tokenBuilder_ != null) {
return tokenBuilder_.getMessageOrBuilder();
} else {
return token_;
}
}
/**
* required .hadoop.common.TokenProto token = 2;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>
getTokenFieldBuilder() {
if (tokenBuilder_ == null) {
tokenBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>(
token_,
getParentForChildren(),
isClean());
token_ = null;
}
return tokenBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetBlockLocalPathInfoRequestProto)
}
static {
defaultInstance = new GetBlockLocalPathInfoRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetBlockLocalPathInfoRequestProto)
}
public interface GetBlockLocalPathInfoResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.ExtendedBlockProto block = 1;
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
boolean hasBlock();
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock();
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder();
// required string localPath = 2;
/**
* required string localPath = 2;
*/
boolean hasLocalPath();
/**
* required string localPath = 2;
*/
java.lang.String getLocalPath();
/**
* required string localPath = 2;
*/
com.google.protobuf.ByteString
getLocalPathBytes();
// required string localMetaPath = 3;
/**
* required string localMetaPath = 3;
*/
boolean hasLocalMetaPath();
/**
* required string localMetaPath = 3;
*/
java.lang.String getLocalMetaPath();
/**
* required string localMetaPath = 3;
*/
com.google.protobuf.ByteString
getLocalMetaPathBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.GetBlockLocalPathInfoResponseProto}
*
*
**
* block - block for which file path information is being returned
* localPath - file path where the block data is stored
* localMetaPath - file path where the block meta data is stored
*
* This message is deprecated in favor of file descriptor passing.
*
*/
public static final class GetBlockLocalPathInfoResponseProto extends
com.google.protobuf.GeneratedMessage
implements GetBlockLocalPathInfoResponseProtoOrBuilder {
// Use GetBlockLocalPathInfoResponseProto.newBuilder() to construct.
private GetBlockLocalPathInfoResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private GetBlockLocalPathInfoResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final GetBlockLocalPathInfoResponseProto defaultInstance;
public static GetBlockLocalPathInfoResponseProto getDefaultInstance() {
return defaultInstance;
}
public GetBlockLocalPathInfoResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetBlockLocalPathInfoResponseProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = block_.toBuilder();
}
block_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(block_);
block_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
bitField0_ |= 0x00000002;
localPath_ = input.readBytes();
break;
}
case 26: {
bitField0_ |= 0x00000004;
localMetaPath_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocalPathInfoResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocalPathInfoResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public GetBlockLocalPathInfoResponseProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new GetBlockLocalPathInfoResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.ExtendedBlockProto block = 1;
public static final int BLOCK_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_;
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public boolean hasBlock() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
return block_;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
return block_;
}
// required string localPath = 2;
public static final int LOCALPATH_FIELD_NUMBER = 2;
private java.lang.Object localPath_;
/**
* required string localPath = 2;
*/
public boolean hasLocalPath() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string localPath = 2;
*/
public java.lang.String getLocalPath() {
java.lang.Object ref = localPath_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
localPath_ = s;
}
return s;
}
}
/**
* required string localPath = 2;
*/
public com.google.protobuf.ByteString
getLocalPathBytes() {
java.lang.Object ref = localPath_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
localPath_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required string localMetaPath = 3;
public static final int LOCALMETAPATH_FIELD_NUMBER = 3;
private java.lang.Object localMetaPath_;
/**
* required string localMetaPath = 3;
*/
public boolean hasLocalMetaPath() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required string localMetaPath = 3;
*/
public java.lang.String getLocalMetaPath() {
java.lang.Object ref = localMetaPath_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
localMetaPath_ = s;
}
return s;
}
}
/**
* required string localMetaPath = 3;
*/
public com.google.protobuf.ByteString
getLocalMetaPathBytes() {
java.lang.Object ref = localMetaPath_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
localMetaPath_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
localPath_ = "";
localMetaPath_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasBlock()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasLocalPath()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasLocalMetaPath()) {
memoizedIsInitialized = 0;
return false;
}
if (!getBlock().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, block_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getLocalPathBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, getLocalMetaPathBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, block_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getLocalPathBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, getLocalMetaPathBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto) obj;
boolean result = true;
result = result && (hasBlock() == other.hasBlock());
if (hasBlock()) {
result = result && getBlock()
.equals(other.getBlock());
}
result = result && (hasLocalPath() == other.hasLocalPath());
if (hasLocalPath()) {
result = result && getLocalPath()
.equals(other.getLocalPath());
}
result = result && (hasLocalMetaPath() == other.hasLocalMetaPath());
if (hasLocalMetaPath()) {
result = result && getLocalMetaPath()
.equals(other.getLocalMetaPath());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasBlock()) {
hash = (37 * hash) + BLOCK_FIELD_NUMBER;
hash = (53 * hash) + getBlock().hashCode();
}
if (hasLocalPath()) {
hash = (37 * hash) + LOCALPATH_FIELD_NUMBER;
hash = (53 * hash) + getLocalPath().hashCode();
}
if (hasLocalMetaPath()) {
hash = (37 * hash) + LOCALMETAPATH_FIELD_NUMBER;
hash = (53 * hash) + getLocalMetaPath().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.GetBlockLocalPathInfoResponseProto}
*
*
**
* block - block for which file path information is being returned
* localPath - file path where the block data is stored
* localMetaPath - file path where the block meta data is stored
*
* This message is deprecated in favor of file descriptor passing.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocalPathInfoResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocalPathInfoResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getBlockFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (blockBuilder_ == null) {
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
localPath_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
localMetaPath_ = "";
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocalPathInfoResponseProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (blockBuilder_ == null) {
result.block_ = block_;
} else {
result.block_ = blockBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.localPath_ = localPath_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.localMetaPath_ = localMetaPath_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance()) return this;
if (other.hasBlock()) {
mergeBlock(other.getBlock());
}
if (other.hasLocalPath()) {
bitField0_ |= 0x00000002;
localPath_ = other.localPath_;
onChanged();
}
if (other.hasLocalMetaPath()) {
bitField0_ |= 0x00000004;
localMetaPath_ = other.localMetaPath_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasBlock()) {
return false;
}
if (!hasLocalPath()) {
return false;
}
if (!hasLocalMetaPath()) {
return false;
}
if (!getBlock().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.ExtendedBlockProto block = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_;
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public boolean hasBlock() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
if (blockBuilder_ == null) {
return block_;
} else {
return blockBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
if (blockBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
block_ = value;
onChanged();
} else {
blockBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder setBlock(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) {
if (blockBuilder_ == null) {
block_ = builderForValue.build();
onChanged();
} else {
blockBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
if (blockBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) {
block_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial();
} else {
block_ = value;
}
onChanged();
} else {
blockBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder clearBlock() {
if (blockBuilder_ == null) {
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
onChanged();
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getBlockFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
if (blockBuilder_ != null) {
return blockBuilder_.getMessageOrBuilder();
} else {
return block_;
}
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>
getBlockFieldBuilder() {
if (blockBuilder_ == null) {
blockBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>(
block_,
getParentForChildren(),
isClean());
block_ = null;
}
return blockBuilder_;
}
// required string localPath = 2;
private java.lang.Object localPath_ = "";
/**
* required string localPath = 2;
*/
public boolean hasLocalPath() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string localPath = 2;
*/
public java.lang.String getLocalPath() {
java.lang.Object ref = localPath_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
localPath_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string localPath = 2;
*/
public com.google.protobuf.ByteString
getLocalPathBytes() {
java.lang.Object ref = localPath_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
localPath_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string localPath = 2;
*/
public Builder setLocalPath(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
localPath_ = value;
onChanged();
return this;
}
/**
* required string localPath = 2;
*/
public Builder clearLocalPath() {
bitField0_ = (bitField0_ & ~0x00000002);
localPath_ = getDefaultInstance().getLocalPath();
onChanged();
return this;
}
/**
* required string localPath = 2;
*/
public Builder setLocalPathBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
localPath_ = value;
onChanged();
return this;
}
// required string localMetaPath = 3;
private java.lang.Object localMetaPath_ = "";
/**
* required string localMetaPath = 3;
*/
public boolean hasLocalMetaPath() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required string localMetaPath = 3;
*/
public java.lang.String getLocalMetaPath() {
java.lang.Object ref = localMetaPath_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
localMetaPath_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string localMetaPath = 3;
*/
public com.google.protobuf.ByteString
getLocalMetaPathBytes() {
java.lang.Object ref = localMetaPath_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
localMetaPath_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string localMetaPath = 3;
*/
public Builder setLocalMetaPath(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
localMetaPath_ = value;
onChanged();
return this;
}
/**
* required string localMetaPath = 3;
*/
public Builder clearLocalMetaPath() {
bitField0_ = (bitField0_ & ~0x00000004);
localMetaPath_ = getDefaultInstance().getLocalMetaPath();
onChanged();
return this;
}
/**
* required string localMetaPath = 3;
*/
public Builder setLocalMetaPathBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
localMetaPath_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetBlockLocalPathInfoResponseProto)
}
static {
defaultInstance = new GetBlockLocalPathInfoResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetBlockLocalPathInfoResponseProto)
}
public interface GetHdfsBlockLocationsRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated .hadoop.common.TokenProto tokens = 2;
/**
* repeated .hadoop.common.TokenProto tokens = 2;
*
*
* Removed: HDFS-3969
* repeated ExtendedBlockProto blocks = 1;
*
*/
java.util.List
getTokensList();
/**
* repeated .hadoop.common.TokenProto tokens = 2;
*
*
* Removed: HDFS-3969
* repeated ExtendedBlockProto blocks = 1;
*
*/
org.apache.hadoop.security.proto.SecurityProtos.TokenProto getTokens(int index);
/**
* repeated .hadoop.common.TokenProto tokens = 2;
*
*
* Removed: HDFS-3969
* repeated ExtendedBlockProto blocks = 1;
*
*/
int getTokensCount();
/**
* repeated .hadoop.common.TokenProto tokens = 2;
*
*
* Removed: HDFS-3969
* repeated ExtendedBlockProto blocks = 1;
*
*/
java.util.List extends org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>
getTokensOrBuilderList();
/**
* repeated .hadoop.common.TokenProto tokens = 2;
*
*
* Removed: HDFS-3969
* repeated ExtendedBlockProto blocks = 1;
*
*/
org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getTokensOrBuilder(
int index);
// required string blockPoolId = 3;
/**
* required string blockPoolId = 3;
*/
boolean hasBlockPoolId();
/**
* required string blockPoolId = 3;
*/
java.lang.String getBlockPoolId();
/**
* required string blockPoolId = 3;
*/
com.google.protobuf.ByteString
getBlockPoolIdBytes();
// repeated sfixed64 blockIds = 4 [packed = true];
/**
* repeated sfixed64 blockIds = 4 [packed = true];
*/
java.util.List getBlockIdsList();
/**
* repeated sfixed64 blockIds = 4 [packed = true];
*/
int getBlockIdsCount();
/**
* repeated sfixed64 blockIds = 4 [packed = true];
*/
long getBlockIds(int index);
}
/**
* Protobuf type {@code hadoop.hdfs.GetHdfsBlockLocationsRequestProto}
*
*
**
* Query for the disk locations of a number of blocks on this DN.
* blockPoolId - the pool to query
* blockIds - list of block IDs to query
* tokens - list of access tokens corresponding to list of block IDs
*
*/
public static final class GetHdfsBlockLocationsRequestProto extends
com.google.protobuf.GeneratedMessage
implements GetHdfsBlockLocationsRequestProtoOrBuilder {
// Use GetHdfsBlockLocationsRequestProto.newBuilder() to construct.
private GetHdfsBlockLocationsRequestProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private GetHdfsBlockLocationsRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final GetHdfsBlockLocationsRequestProto defaultInstance;
public static GetHdfsBlockLocationsRequestProto getDefaultInstance() {
return defaultInstance;
}
public GetHdfsBlockLocationsRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetHdfsBlockLocationsRequestProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 18: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
tokens_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
tokens_.add(input.readMessage(org.apache.hadoop.security.proto.SecurityProtos.TokenProto.PARSER, extensionRegistry));
break;
}
case 26: {
bitField0_ |= 0x00000001;
blockPoolId_ = input.readBytes();
break;
}
case 33: {
if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
blockIds_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000004;
}
blockIds_.add(input.readSFixed64());
break;
}
case 34: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00000004) == 0x00000004) && input.getBytesUntilLimit() > 0) {
blockIds_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000004;
}
while (input.getBytesUntilLimit() > 0) {
blockIds_.add(input.readSFixed64());
}
input.popLimit(limit);
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
tokens_ = java.util.Collections.unmodifiableList(tokens_);
}
if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
blockIds_ = java.util.Collections.unmodifiableList(blockIds_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetHdfsBlockLocationsRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetHdfsBlockLocationsRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public GetHdfsBlockLocationsRequestProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new GetHdfsBlockLocationsRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// repeated .hadoop.common.TokenProto tokens = 2;
public static final int TOKENS_FIELD_NUMBER = 2;
private java.util.List tokens_;
/**
* repeated .hadoop.common.TokenProto tokens = 2;
*
*
* Removed: HDFS-3969
* repeated ExtendedBlockProto blocks = 1;
*
*/
public java.util.List getTokensList() {
return tokens_;
}
/**
* repeated .hadoop.common.TokenProto tokens = 2;
*
*
* Removed: HDFS-3969
* repeated ExtendedBlockProto blocks = 1;
*
*/
public java.util.List extends org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>
getTokensOrBuilderList() {
return tokens_;
}
/**
* repeated .hadoop.common.TokenProto tokens = 2;
*
*
* Removed: HDFS-3969
* repeated ExtendedBlockProto blocks = 1;
*
*/
public int getTokensCount() {
return tokens_.size();
}
/**
* repeated .hadoop.common.TokenProto tokens = 2;
*
*
* Removed: HDFS-3969
* repeated ExtendedBlockProto blocks = 1;
*
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getTokens(int index) {
return tokens_.get(index);
}
/**
* repeated .hadoop.common.TokenProto tokens = 2;
*
*
* Removed: HDFS-3969
* repeated ExtendedBlockProto blocks = 1;
*
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getTokensOrBuilder(
int index) {
return tokens_.get(index);
}
// required string blockPoolId = 3;
public static final int BLOCKPOOLID_FIELD_NUMBER = 3;
private java.lang.Object blockPoolId_;
/**
* required string blockPoolId = 3;
*/
public boolean hasBlockPoolId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string blockPoolId = 3;
*/
public java.lang.String getBlockPoolId() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
blockPoolId_ = s;
}
return s;
}
}
/**
* required string blockPoolId = 3;
*/
public com.google.protobuf.ByteString
getBlockPoolIdBytes() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
blockPoolId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// repeated sfixed64 blockIds = 4 [packed = true];
public static final int BLOCKIDS_FIELD_NUMBER = 4;
private java.util.List blockIds_;
/**
* repeated sfixed64 blockIds = 4 [packed = true];
*/
public java.util.List
getBlockIdsList() {
return blockIds_;
}
/**
* repeated sfixed64 blockIds = 4 [packed = true];
*/
public int getBlockIdsCount() {
return blockIds_.size();
}
/**
* repeated sfixed64 blockIds = 4 [packed = true];
*/
public long getBlockIds(int index) {
return blockIds_.get(index);
}
private int blockIdsMemoizedSerializedSize = -1;
private void initFields() {
tokens_ = java.util.Collections.emptyList();
blockPoolId_ = "";
blockIds_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasBlockPoolId()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getTokensCount(); i++) {
if (!getTokens(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < tokens_.size(); i++) {
output.writeMessage(2, tokens_.get(i));
}
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(3, getBlockPoolIdBytes());
}
if (getBlockIdsList().size() > 0) {
output.writeRawVarint32(34);
output.writeRawVarint32(blockIdsMemoizedSerializedSize);
}
for (int i = 0; i < blockIds_.size(); i++) {
output.writeSFixed64NoTag(blockIds_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
for (int i = 0; i < tokens_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, tokens_.get(i));
}
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, getBlockPoolIdBytes());
}
{
int dataSize = 0;
dataSize = 8 * getBlockIdsList().size();
size += dataSize;
if (!getBlockIdsList().isEmpty()) {
size += 1;
size += com.google.protobuf.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
blockIdsMemoizedSerializedSize = dataSize;
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto) obj;
boolean result = true;
result = result && getTokensList()
.equals(other.getTokensList());
result = result && (hasBlockPoolId() == other.hasBlockPoolId());
if (hasBlockPoolId()) {
result = result && getBlockPoolId()
.equals(other.getBlockPoolId());
}
result = result && getBlockIdsList()
.equals(other.getBlockIdsList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (getTokensCount() > 0) {
hash = (37 * hash) + TOKENS_FIELD_NUMBER;
hash = (53 * hash) + getTokensList().hashCode();
}
if (hasBlockPoolId()) {
hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER;
hash = (53 * hash) + getBlockPoolId().hashCode();
}
if (getBlockIdsCount() > 0) {
hash = (37 * hash) + BLOCKIDS_FIELD_NUMBER;
hash = (53 * hash) + getBlockIdsList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.GetHdfsBlockLocationsRequestProto}
*
*
**
* Query for the disk locations of a number of blocks on this DN.
* blockPoolId - the pool to query
* blockIds - list of block IDs to query
* tokens - list of access tokens corresponding to list of block IDs
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetHdfsBlockLocationsRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetHdfsBlockLocationsRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getTokensFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (tokensBuilder_ == null) {
tokens_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
} else {
tokensBuilder_.clear();
}
blockPoolId_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
blockIds_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetHdfsBlockLocationsRequestProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (tokensBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001)) {
tokens_ = java.util.Collections.unmodifiableList(tokens_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.tokens_ = tokens_;
} else {
result.tokens_ = tokensBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000001;
}
result.blockPoolId_ = blockPoolId_;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
blockIds_ = java.util.Collections.unmodifiableList(blockIds_);
bitField0_ = (bitField0_ & ~0x00000004);
}
result.blockIds_ = blockIds_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto.getDefaultInstance()) return this;
if (tokensBuilder_ == null) {
if (!other.tokens_.isEmpty()) {
if (tokens_.isEmpty()) {
tokens_ = other.tokens_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureTokensIsMutable();
tokens_.addAll(other.tokens_);
}
onChanged();
}
} else {
if (!other.tokens_.isEmpty()) {
if (tokensBuilder_.isEmpty()) {
tokensBuilder_.dispose();
tokensBuilder_ = null;
tokens_ = other.tokens_;
bitField0_ = (bitField0_ & ~0x00000001);
tokensBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getTokensFieldBuilder() : null;
} else {
tokensBuilder_.addAllMessages(other.tokens_);
}
}
}
if (other.hasBlockPoolId()) {
bitField0_ |= 0x00000002;
blockPoolId_ = other.blockPoolId_;
onChanged();
}
if (!other.blockIds_.isEmpty()) {
if (blockIds_.isEmpty()) {
blockIds_ = other.blockIds_;
bitField0_ = (bitField0_ & ~0x00000004);
} else {
ensureBlockIdsIsMutable();
blockIds_.addAll(other.blockIds_);
}
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasBlockPoolId()) {
return false;
}
for (int i = 0; i < getTokensCount(); i++) {
if (!getTokens(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// repeated .hadoop.common.TokenProto tokens = 2;
private java.util.List tokens_ =
java.util.Collections.emptyList();
private void ensureTokensIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
tokens_ = new java.util.ArrayList(tokens_);
bitField0_ |= 0x00000001;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> tokensBuilder_;
/**
* repeated .hadoop.common.TokenProto tokens = 2;
*
*
* Removed: HDFS-3969
* repeated ExtendedBlockProto blocks = 1;
*
*/
public java.util.List getTokensList() {
if (tokensBuilder_ == null) {
return java.util.Collections.unmodifiableList(tokens_);
} else {
return tokensBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.common.TokenProto tokens = 2;
*
*
* Removed: HDFS-3969
* repeated ExtendedBlockProto blocks = 1;
*
*/
public int getTokensCount() {
if (tokensBuilder_ == null) {
return tokens_.size();
} else {
return tokensBuilder_.getCount();
}
}
/**
* repeated .hadoop.common.TokenProto tokens = 2;
*
*
* Removed: HDFS-3969
* repeated ExtendedBlockProto blocks = 1;
*
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getTokens(int index) {
if (tokensBuilder_ == null) {
return tokens_.get(index);
} else {
return tokensBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.common.TokenProto tokens = 2;
*
*
* Removed: HDFS-3969
* repeated ExtendedBlockProto blocks = 1;
*
*/
public Builder setTokens(
int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
if (tokensBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTokensIsMutable();
tokens_.set(index, value);
onChanged();
} else {
tokensBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.common.TokenProto tokens = 2;
*
*
* Removed: HDFS-3969
* repeated ExtendedBlockProto blocks = 1;
*
*/
public Builder setTokens(
int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) {
if (tokensBuilder_ == null) {
ensureTokensIsMutable();
tokens_.set(index, builderForValue.build());
onChanged();
} else {
tokensBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.common.TokenProto tokens = 2;
*
*
* Removed: HDFS-3969
* repeated ExtendedBlockProto blocks = 1;
*
*/
public Builder addTokens(org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
if (tokensBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTokensIsMutable();
tokens_.add(value);
onChanged();
} else {
tokensBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.common.TokenProto tokens = 2;
*
*
* Removed: HDFS-3969
* repeated ExtendedBlockProto blocks = 1;
*
*/
public Builder addTokens(
int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
if (tokensBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTokensIsMutable();
tokens_.add(index, value);
onChanged();
} else {
tokensBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.common.TokenProto tokens = 2;
*
*
* Removed: HDFS-3969
* repeated ExtendedBlockProto blocks = 1;
*
*/
public Builder addTokens(
org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) {
if (tokensBuilder_ == null) {
ensureTokensIsMutable();
tokens_.add(builderForValue.build());
onChanged();
} else {
tokensBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.common.TokenProto tokens = 2;
*
*
* Removed: HDFS-3969
* repeated ExtendedBlockProto blocks = 1;
*
*/
public Builder addTokens(
int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) {
if (tokensBuilder_ == null) {
ensureTokensIsMutable();
tokens_.add(index, builderForValue.build());
onChanged();
} else {
tokensBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.common.TokenProto tokens = 2;
*
*
* Removed: HDFS-3969
* repeated ExtendedBlockProto blocks = 1;
*
*/
public Builder addAllTokens(
java.lang.Iterable extends org.apache.hadoop.security.proto.SecurityProtos.TokenProto> values) {
if (tokensBuilder_ == null) {
ensureTokensIsMutable();
super.addAll(values, tokens_);
onChanged();
} else {
tokensBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.common.TokenProto tokens = 2;
*
*
* Removed: HDFS-3969
* repeated ExtendedBlockProto blocks = 1;
*
*/
public Builder clearTokens() {
if (tokensBuilder_ == null) {
tokens_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
} else {
tokensBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.common.TokenProto tokens = 2;
*
*
* Removed: HDFS-3969
* repeated ExtendedBlockProto blocks = 1;
*
*/
public Builder removeTokens(int index) {
if (tokensBuilder_ == null) {
ensureTokensIsMutable();
tokens_.remove(index);
onChanged();
} else {
tokensBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.common.TokenProto tokens = 2;
*
*
* Removed: HDFS-3969
* repeated ExtendedBlockProto blocks = 1;
*
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder getTokensBuilder(
int index) {
return getTokensFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.common.TokenProto tokens = 2;
*
*
* Removed: HDFS-3969
* repeated ExtendedBlockProto blocks = 1;
*
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getTokensOrBuilder(
int index) {
if (tokensBuilder_ == null) {
return tokens_.get(index); } else {
return tokensBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.common.TokenProto tokens = 2;
*
*
* Removed: HDFS-3969
* repeated ExtendedBlockProto blocks = 1;
*
*/
public java.util.List extends org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>
getTokensOrBuilderList() {
if (tokensBuilder_ != null) {
return tokensBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(tokens_);
}
}
/**
* repeated .hadoop.common.TokenProto tokens = 2;
*
*
* Removed: HDFS-3969
* repeated ExtendedBlockProto blocks = 1;
*
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder addTokensBuilder() {
return getTokensFieldBuilder().addBuilder(
org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance());
}
/**
* repeated .hadoop.common.TokenProto tokens = 2;
*
*
* Removed: HDFS-3969
* repeated ExtendedBlockProto blocks = 1;
*
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder addTokensBuilder(
int index) {
return getTokensFieldBuilder().addBuilder(
index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance());
}
/**
* repeated .hadoop.common.TokenProto tokens = 2;
*
*
* Removed: HDFS-3969
* repeated ExtendedBlockProto blocks = 1;
*
*/
public java.util.List
getTokensBuilderList() {
return getTokensFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>
getTokensFieldBuilder() {
if (tokensBuilder_ == null) {
tokensBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>(
tokens_,
((bitField0_ & 0x00000001) == 0x00000001),
getParentForChildren(),
isClean());
tokens_ = null;
}
return tokensBuilder_;
}
// required string blockPoolId = 3;
private java.lang.Object blockPoolId_ = "";
/**
* required string blockPoolId = 3;
*/
public boolean hasBlockPoolId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string blockPoolId = 3;
*/
public java.lang.String getBlockPoolId() {
java.lang.Object ref = blockPoolId_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
blockPoolId_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string blockPoolId = 3;
*/
public com.google.protobuf.ByteString
getBlockPoolIdBytes() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
blockPoolId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string blockPoolId = 3;
*/
public Builder setBlockPoolId(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
blockPoolId_ = value;
onChanged();
return this;
}
/**
* required string blockPoolId = 3;
*/
public Builder clearBlockPoolId() {
bitField0_ = (bitField0_ & ~0x00000002);
blockPoolId_ = getDefaultInstance().getBlockPoolId();
onChanged();
return this;
}
/**
* required string blockPoolId = 3;
*/
public Builder setBlockPoolIdBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
blockPoolId_ = value;
onChanged();
return this;
}
// repeated sfixed64 blockIds = 4 [packed = true];
private java.util.List blockIds_ = java.util.Collections.emptyList();
private void ensureBlockIdsIsMutable() {
if (!((bitField0_ & 0x00000004) == 0x00000004)) {
blockIds_ = new java.util.ArrayList(blockIds_);
bitField0_ |= 0x00000004;
}
}
/**
* repeated sfixed64 blockIds = 4 [packed = true];
*/
public java.util.List
getBlockIdsList() {
return java.util.Collections.unmodifiableList(blockIds_);
}
/**
* repeated sfixed64 blockIds = 4 [packed = true];
*/
public int getBlockIdsCount() {
return blockIds_.size();
}
/**
* repeated sfixed64 blockIds = 4 [packed = true];
*/
public long getBlockIds(int index) {
return blockIds_.get(index);
}
/**
* repeated sfixed64 blockIds = 4 [packed = true];
*/
public Builder setBlockIds(
int index, long value) {
ensureBlockIdsIsMutable();
blockIds_.set(index, value);
onChanged();
return this;
}
/**
* repeated sfixed64 blockIds = 4 [packed = true];
*/
public Builder addBlockIds(long value) {
ensureBlockIdsIsMutable();
blockIds_.add(value);
onChanged();
return this;
}
/**
* repeated sfixed64 blockIds = 4 [packed = true];
*/
public Builder addAllBlockIds(
java.lang.Iterable extends java.lang.Long> values) {
ensureBlockIdsIsMutable();
super.addAll(values, blockIds_);
onChanged();
return this;
}
/**
* repeated sfixed64 blockIds = 4 [packed = true];
*/
public Builder clearBlockIds() {
blockIds_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetHdfsBlockLocationsRequestProto)
}
static {
defaultInstance = new GetHdfsBlockLocationsRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetHdfsBlockLocationsRequestProto)
}
public interface GetHdfsBlockLocationsResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated bytes volumeIds = 1;
/**
* repeated bytes volumeIds = 1;
*/
java.util.List getVolumeIdsList();
/**
* repeated bytes volumeIds = 1;
*/
int getVolumeIdsCount();
/**
* repeated bytes volumeIds = 1;
*/
com.google.protobuf.ByteString getVolumeIds(int index);
// repeated uint32 volumeIndexes = 2 [packed = true];
/**
* repeated uint32 volumeIndexes = 2 [packed = true];
*/
java.util.List getVolumeIndexesList();
/**
* repeated uint32 volumeIndexes = 2 [packed = true];
*/
int getVolumeIndexesCount();
/**
* repeated uint32 volumeIndexes = 2 [packed = true];
*/
int getVolumeIndexes(int index);
}
/**
* Protobuf type {@code hadoop.hdfs.GetHdfsBlockLocationsResponseProto}
*
*
**
* volumeIds - id of each volume, potentially multiple bytes
* volumeIndexes - for each block, an index into volumeIds specifying the volume
* on which it is located. If block is not present on any volume,
* index is set to MAX_INT.
*
*/
public static final class GetHdfsBlockLocationsResponseProto extends
com.google.protobuf.GeneratedMessage
implements GetHdfsBlockLocationsResponseProtoOrBuilder {
// Use GetHdfsBlockLocationsResponseProto.newBuilder() to construct.
private GetHdfsBlockLocationsResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private GetHdfsBlockLocationsResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final GetHdfsBlockLocationsResponseProto defaultInstance;
public static GetHdfsBlockLocationsResponseProto getDefaultInstance() {
return defaultInstance;
}
public GetHdfsBlockLocationsResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetHdfsBlockLocationsResponseProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
volumeIds_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
volumeIds_.add(input.readBytes());
break;
}
case 16: {
if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
volumeIndexes_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000002;
}
volumeIndexes_.add(input.readUInt32());
break;
}
case 18: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00000002) == 0x00000002) && input.getBytesUntilLimit() > 0) {
volumeIndexes_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000002;
}
while (input.getBytesUntilLimit() > 0) {
volumeIndexes_.add(input.readUInt32());
}
input.popLimit(limit);
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
volumeIds_ = java.util.Collections.unmodifiableList(volumeIds_);
}
if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
volumeIndexes_ = java.util.Collections.unmodifiableList(volumeIndexes_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetHdfsBlockLocationsResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetHdfsBlockLocationsResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public GetHdfsBlockLocationsResponseProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new GetHdfsBlockLocationsResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
// repeated bytes volumeIds = 1;
public static final int VOLUMEIDS_FIELD_NUMBER = 1;
private java.util.List volumeIds_;
/**
* repeated bytes volumeIds = 1;
*/
public java.util.List
getVolumeIdsList() {
return volumeIds_;
}
/**
* repeated bytes volumeIds = 1;
*/
public int getVolumeIdsCount() {
return volumeIds_.size();
}
/**
* repeated bytes volumeIds = 1;
*/
public com.google.protobuf.ByteString getVolumeIds(int index) {
return volumeIds_.get(index);
}
// repeated uint32 volumeIndexes = 2 [packed = true];
public static final int VOLUMEINDEXES_FIELD_NUMBER = 2;
private java.util.List volumeIndexes_;
/**
* repeated uint32 volumeIndexes = 2 [packed = true];
*/
public java.util.List
getVolumeIndexesList() {
return volumeIndexes_;
}
/**
* repeated uint32 volumeIndexes = 2 [packed = true];
*/
public int getVolumeIndexesCount() {
return volumeIndexes_.size();
}
/**
* repeated uint32 volumeIndexes = 2 [packed = true];
*/
public int getVolumeIndexes(int index) {
return volumeIndexes_.get(index);
}
private int volumeIndexesMemoizedSerializedSize = -1;
private void initFields() {
volumeIds_ = java.util.Collections.emptyList();
volumeIndexes_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < volumeIds_.size(); i++) {
output.writeBytes(1, volumeIds_.get(i));
}
if (getVolumeIndexesList().size() > 0) {
output.writeRawVarint32(18);
output.writeRawVarint32(volumeIndexesMemoizedSerializedSize);
}
for (int i = 0; i < volumeIndexes_.size(); i++) {
output.writeUInt32NoTag(volumeIndexes_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
{
int dataSize = 0;
for (int i = 0; i < volumeIds_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeBytesSizeNoTag(volumeIds_.get(i));
}
size += dataSize;
size += 1 * getVolumeIdsList().size();
}
{
int dataSize = 0;
for (int i = 0; i < volumeIndexes_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeUInt32SizeNoTag(volumeIndexes_.get(i));
}
size += dataSize;
if (!getVolumeIndexesList().isEmpty()) {
size += 1;
size += com.google.protobuf.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
volumeIndexesMemoizedSerializedSize = dataSize;
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto) obj;
boolean result = true;
result = result && getVolumeIdsList()
.equals(other.getVolumeIdsList());
result = result && getVolumeIndexesList()
.equals(other.getVolumeIndexesList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (getVolumeIdsCount() > 0) {
hash = (37 * hash) + VOLUMEIDS_FIELD_NUMBER;
hash = (53 * hash) + getVolumeIdsList().hashCode();
}
if (getVolumeIndexesCount() > 0) {
hash = (37 * hash) + VOLUMEINDEXES_FIELD_NUMBER;
hash = (53 * hash) + getVolumeIndexesList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.GetHdfsBlockLocationsResponseProto}
*
*
**
* volumeIds - id of each volume, potentially multiple bytes
* volumeIndexes - for each block, an index into volumeIds specifying the volume
* on which it is located. If block is not present on any volume,
* index is set to MAX_INT.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetHdfsBlockLocationsResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetHdfsBlockLocationsResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
volumeIds_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
volumeIndexes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetHdfsBlockLocationsResponseProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto(this);
int from_bitField0_ = bitField0_;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
volumeIds_ = java.util.Collections.unmodifiableList(volumeIds_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.volumeIds_ = volumeIds_;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
volumeIndexes_ = java.util.Collections.unmodifiableList(volumeIndexes_);
bitField0_ = (bitField0_ & ~0x00000002);
}
result.volumeIndexes_ = volumeIndexes_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto.getDefaultInstance()) return this;
if (!other.volumeIds_.isEmpty()) {
if (volumeIds_.isEmpty()) {
volumeIds_ = other.volumeIds_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureVolumeIdsIsMutable();
volumeIds_.addAll(other.volumeIds_);
}
onChanged();
}
if (!other.volumeIndexes_.isEmpty()) {
if (volumeIndexes_.isEmpty()) {
volumeIndexes_ = other.volumeIndexes_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureVolumeIndexesIsMutable();
volumeIndexes_.addAll(other.volumeIndexes_);
}
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// repeated bytes volumeIds = 1;
private java.util.List volumeIds_ = java.util.Collections.emptyList();
private void ensureVolumeIdsIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
volumeIds_ = new java.util.ArrayList(volumeIds_);
bitField0_ |= 0x00000001;
}
}
/**
* repeated bytes volumeIds = 1;
*/
public java.util.List
getVolumeIdsList() {
return java.util.Collections.unmodifiableList(volumeIds_);
}
/**
* repeated bytes volumeIds = 1;
*/
public int getVolumeIdsCount() {
return volumeIds_.size();
}
/**
* repeated bytes volumeIds = 1;
*/
public com.google.protobuf.ByteString getVolumeIds(int index) {
return volumeIds_.get(index);
}
/**
* repeated bytes volumeIds = 1;
*/
public Builder setVolumeIds(
int index, com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
ensureVolumeIdsIsMutable();
volumeIds_.set(index, value);
onChanged();
return this;
}
/**
* repeated bytes volumeIds = 1;
*/
public Builder addVolumeIds(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
ensureVolumeIdsIsMutable();
volumeIds_.add(value);
onChanged();
return this;
}
/**
* repeated bytes volumeIds = 1;
*/
public Builder addAllVolumeIds(
java.lang.Iterable extends com.google.protobuf.ByteString> values) {
ensureVolumeIdsIsMutable();
super.addAll(values, volumeIds_);
onChanged();
return this;
}
/**
* repeated bytes volumeIds = 1;
*/
public Builder clearVolumeIds() {
volumeIds_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
return this;
}
// repeated uint32 volumeIndexes = 2 [packed = true];
private java.util.List volumeIndexes_ = java.util.Collections.emptyList();
private void ensureVolumeIndexesIsMutable() {
if (!((bitField0_ & 0x00000002) == 0x00000002)) {
volumeIndexes_ = new java.util.ArrayList(volumeIndexes_);
bitField0_ |= 0x00000002;
}
}
/**
* repeated uint32 volumeIndexes = 2 [packed = true];
*/
public java.util.List
getVolumeIndexesList() {
return java.util.Collections.unmodifiableList(volumeIndexes_);
}
/**
* repeated uint32 volumeIndexes = 2 [packed = true];
*/
public int getVolumeIndexesCount() {
return volumeIndexes_.size();
}
/**
* repeated uint32 volumeIndexes = 2 [packed = true];
*/
public int getVolumeIndexes(int index) {
return volumeIndexes_.get(index);
}
/**
* repeated uint32 volumeIndexes = 2 [packed = true];
*/
public Builder setVolumeIndexes(
int index, int value) {
ensureVolumeIndexesIsMutable();
volumeIndexes_.set(index, value);
onChanged();
return this;
}
/**
* repeated uint32 volumeIndexes = 2 [packed = true];
*/
public Builder addVolumeIndexes(int value) {
ensureVolumeIndexesIsMutable();
volumeIndexes_.add(value);
onChanged();
return this;
}
/**
* repeated uint32 volumeIndexes = 2 [packed = true];
*/
public Builder addAllVolumeIndexes(
java.lang.Iterable extends java.lang.Integer> values) {
ensureVolumeIndexesIsMutable();
super.addAll(values, volumeIndexes_);
onChanged();
return this;
}
/**
* repeated uint32 volumeIndexes = 2 [packed = true];
*/
public Builder clearVolumeIndexes() {
volumeIndexes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetHdfsBlockLocationsResponseProto)
}
static {
defaultInstance = new GetHdfsBlockLocationsResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetHdfsBlockLocationsResponseProto)
}
public interface ShutdownDatanodeRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required bool forUpgrade = 1;
/**
* required bool forUpgrade = 1;
*/
boolean hasForUpgrade();
/**
* required bool forUpgrade = 1;
*/
boolean getForUpgrade();
}
/**
* Protobuf type {@code hadoop.hdfs.ShutdownDatanodeRequestProto}
*
*
**
* forUpgrade - if true, clients are advised to wait for restart and quick
* upgrade restart is instrumented. Otherwise, datanode does
* the regular shutdown.
*
*/
public static final class ShutdownDatanodeRequestProto extends
com.google.protobuf.GeneratedMessage
implements ShutdownDatanodeRequestProtoOrBuilder {
// Use ShutdownDatanodeRequestProto.newBuilder() to construct.
private ShutdownDatanodeRequestProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ShutdownDatanodeRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ShutdownDatanodeRequestProto defaultInstance;
public static ShutdownDatanodeRequestProto getDefaultInstance() {
return defaultInstance;
}
public ShutdownDatanodeRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ShutdownDatanodeRequestProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
forUpgrade_ = input.readBool();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_ShutdownDatanodeRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_ShutdownDatanodeRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ShutdownDatanodeRequestProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ShutdownDatanodeRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required bool forUpgrade = 1;
public static final int FORUPGRADE_FIELD_NUMBER = 1;
private boolean forUpgrade_;
/**
* required bool forUpgrade = 1;
*/
public boolean hasForUpgrade() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required bool forUpgrade = 1;
*/
public boolean getForUpgrade() {
return forUpgrade_;
}
private void initFields() {
forUpgrade_ = false;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasForUpgrade()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBool(1, forUpgrade_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(1, forUpgrade_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto) obj;
boolean result = true;
result = result && (hasForUpgrade() == other.hasForUpgrade());
if (hasForUpgrade()) {
result = result && (getForUpgrade()
== other.getForUpgrade());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasForUpgrade()) {
hash = (37 * hash) + FORUPGRADE_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getForUpgrade());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ShutdownDatanodeRequestProto}
*
*
**
* forUpgrade - if true, clients are advised to wait for restart and quick
* upgrade restart is instrumented. Otherwise, datanode does
* the regular shutdown.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_ShutdownDatanodeRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_ShutdownDatanodeRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
forUpgrade_ = false;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_ShutdownDatanodeRequestProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.forUpgrade_ = forUpgrade_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto.getDefaultInstance()) return this;
if (other.hasForUpgrade()) {
setForUpgrade(other.getForUpgrade());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasForUpgrade()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required bool forUpgrade = 1;
private boolean forUpgrade_ ;
/**
* required bool forUpgrade = 1;
*/
public boolean hasForUpgrade() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required bool forUpgrade = 1;
*/
public boolean getForUpgrade() {
return forUpgrade_;
}
/**
* required bool forUpgrade = 1;
*/
public Builder setForUpgrade(boolean value) {
bitField0_ |= 0x00000001;
forUpgrade_ = value;
onChanged();
return this;
}
/**
* required bool forUpgrade = 1;
*/
public Builder clearForUpgrade() {
bitField0_ = (bitField0_ & ~0x00000001);
forUpgrade_ = false;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ShutdownDatanodeRequestProto)
}
static {
defaultInstance = new ShutdownDatanodeRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ShutdownDatanodeRequestProto)
}
public interface ShutdownDatanodeResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.ShutdownDatanodeResponseProto}
*/
public static final class ShutdownDatanodeResponseProto extends
com.google.protobuf.GeneratedMessage
implements ShutdownDatanodeResponseProtoOrBuilder {
// Use ShutdownDatanodeResponseProto.newBuilder() to construct.
private ShutdownDatanodeResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ShutdownDatanodeResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ShutdownDatanodeResponseProto defaultInstance;
public static ShutdownDatanodeResponseProto getDefaultInstance() {
return defaultInstance;
}
public ShutdownDatanodeResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ShutdownDatanodeResponseProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_ShutdownDatanodeResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_ShutdownDatanodeResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ShutdownDatanodeResponseProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ShutdownDatanodeResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ShutdownDatanodeResponseProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_ShutdownDatanodeResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_ShutdownDatanodeResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_ShutdownDatanodeResponseProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ShutdownDatanodeResponseProto)
}
static {
defaultInstance = new ShutdownDatanodeResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ShutdownDatanodeResponseProto)
}
public interface GetDatanodeInfoRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.GetDatanodeInfoRequestProto}
*
*
**
* Ping datanode for liveness and quick info
*
*/
public static final class GetDatanodeInfoRequestProto extends
com.google.protobuf.GeneratedMessage
implements GetDatanodeInfoRequestProtoOrBuilder {
// Use GetDatanodeInfoRequestProto.newBuilder() to construct.
private GetDatanodeInfoRequestProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private GetDatanodeInfoRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final GetDatanodeInfoRequestProto defaultInstance;
public static GetDatanodeInfoRequestProto getDefaultInstance() {
return defaultInstance;
}
public GetDatanodeInfoRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetDatanodeInfoRequestProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeInfoRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeInfoRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public GetDatanodeInfoRequestProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new GetDatanodeInfoRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.GetDatanodeInfoRequestProto}
*
*
**
* Ping datanode for liveness and quick info
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeInfoRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeInfoRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeInfoRequestProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetDatanodeInfoRequestProto)
}
static {
defaultInstance = new GetDatanodeInfoRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetDatanodeInfoRequestProto)
}
public interface GetDatanodeInfoResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
boolean hasLocalInfo();
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto getLocalInfo();
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProtoOrBuilder getLocalInfoOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.GetDatanodeInfoResponseProto}
*/
public static final class GetDatanodeInfoResponseProto extends
com.google.protobuf.GeneratedMessage
implements GetDatanodeInfoResponseProtoOrBuilder {
// Use GetDatanodeInfoResponseProto.newBuilder() to construct.
private GetDatanodeInfoResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private GetDatanodeInfoResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final GetDatanodeInfoResponseProto defaultInstance;
public static GetDatanodeInfoResponseProto getDefaultInstance() {
return defaultInstance;
}
public GetDatanodeInfoResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetDatanodeInfoResponseProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = localInfo_.toBuilder();
}
localInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(localInfo_);
localInfo_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeInfoResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeInfoResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public GetDatanodeInfoResponseProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new GetDatanodeInfoResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
public static final int LOCALINFO_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto localInfo_;
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public boolean hasLocalInfo() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto getLocalInfo() {
return localInfo_;
}
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProtoOrBuilder getLocalInfoOrBuilder() {
return localInfo_;
}
private void initFields() {
localInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasLocalInfo()) {
memoizedIsInitialized = 0;
return false;
}
if (!getLocalInfo().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, localInfo_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, localInfo_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto) obj;
boolean result = true;
result = result && (hasLocalInfo() == other.hasLocalInfo());
if (hasLocalInfo()) {
result = result && getLocalInfo()
.equals(other.getLocalInfo());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasLocalInfo()) {
hash = (37 * hash) + LOCALINFO_FIELD_NUMBER;
hash = (53 * hash) + getLocalInfo().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.GetDatanodeInfoResponseProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeInfoResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeInfoResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getLocalInfoFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (localInfoBuilder_ == null) {
localInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.getDefaultInstance();
} else {
localInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeInfoResponseProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (localInfoBuilder_ == null) {
result.localInfo_ = localInfo_;
} else {
result.localInfo_ = localInfoBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.getDefaultInstance()) return this;
if (other.hasLocalInfo()) {
mergeLocalInfo(other.getLocalInfo());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasLocalInfo()) {
return false;
}
if (!getLocalInfo().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto localInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProtoOrBuilder> localInfoBuilder_;
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public boolean hasLocalInfo() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto getLocalInfo() {
if (localInfoBuilder_ == null) {
return localInfo_;
} else {
return localInfoBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public Builder setLocalInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto value) {
if (localInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
localInfo_ = value;
onChanged();
} else {
localInfoBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public Builder setLocalInfo(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.Builder builderForValue) {
if (localInfoBuilder_ == null) {
localInfo_ = builderForValue.build();
onChanged();
} else {
localInfoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public Builder mergeLocalInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto value) {
if (localInfoBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
localInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.getDefaultInstance()) {
localInfo_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.newBuilder(localInfo_).mergeFrom(value).buildPartial();
} else {
localInfo_ = value;
}
onChanged();
} else {
localInfoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public Builder clearLocalInfo() {
if (localInfoBuilder_ == null) {
localInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.getDefaultInstance();
onChanged();
} else {
localInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.Builder getLocalInfoBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getLocalInfoFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProtoOrBuilder getLocalInfoOrBuilder() {
if (localInfoBuilder_ != null) {
return localInfoBuilder_.getMessageOrBuilder();
} else {
return localInfo_;
}
}
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProtoOrBuilder>
getLocalInfoFieldBuilder() {
if (localInfoBuilder_ == null) {
localInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProtoOrBuilder>(
localInfo_,
getParentForChildren(),
isClean());
localInfo_ = null;
}
return localInfoBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetDatanodeInfoResponseProto)
}
static {
defaultInstance = new GetDatanodeInfoResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetDatanodeInfoResponseProto)
}
public interface StartReconfigurationRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.StartReconfigurationRequestProto}
*
*
** Asks DataNode to reload configuration file.
*
*/
public static final class StartReconfigurationRequestProto extends
com.google.protobuf.GeneratedMessage
implements StartReconfigurationRequestProtoOrBuilder {
// Use StartReconfigurationRequestProto.newBuilder() to construct.
private StartReconfigurationRequestProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private StartReconfigurationRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final StartReconfigurationRequestProto defaultInstance;
public static StartReconfigurationRequestProto getDefaultInstance() {
return defaultInstance;
}
public StartReconfigurationRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private StartReconfigurationRequestProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_StartReconfigurationRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_StartReconfigurationRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public StartReconfigurationRequestProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new StartReconfigurationRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.StartReconfigurationRequestProto}
*
*
** Asks DataNode to reload configuration file.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_StartReconfigurationRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_StartReconfigurationRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_StartReconfigurationRequestProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.StartReconfigurationRequestProto)
}
static {
defaultInstance = new StartReconfigurationRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.StartReconfigurationRequestProto)
}
public interface StartReconfigurationResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.StartReconfigurationResponseProto}
*/
public static final class StartReconfigurationResponseProto extends
com.google.protobuf.GeneratedMessage
implements StartReconfigurationResponseProtoOrBuilder {
// Use StartReconfigurationResponseProto.newBuilder() to construct.
private StartReconfigurationResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private StartReconfigurationResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final StartReconfigurationResponseProto defaultInstance;
public static StartReconfigurationResponseProto getDefaultInstance() {
return defaultInstance;
}
public StartReconfigurationResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private StartReconfigurationResponseProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_StartReconfigurationResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_StartReconfigurationResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public StartReconfigurationResponseProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new StartReconfigurationResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.StartReconfigurationResponseProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_StartReconfigurationResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_StartReconfigurationResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_StartReconfigurationResponseProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.StartReconfigurationResponseProto)
}
static {
defaultInstance = new StartReconfigurationResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.StartReconfigurationResponseProto)
}
public interface TriggerBlockReportRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required bool incremental = 1;
/**
* required bool incremental = 1;
*/
boolean hasIncremental();
/**
* required bool incremental = 1;
*/
boolean getIncremental();
}
/**
* Protobuf type {@code hadoop.hdfs.TriggerBlockReportRequestProto}
*/
public static final class TriggerBlockReportRequestProto extends
com.google.protobuf.GeneratedMessage
implements TriggerBlockReportRequestProtoOrBuilder {
// Use TriggerBlockReportRequestProto.newBuilder() to construct.
private TriggerBlockReportRequestProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private TriggerBlockReportRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final TriggerBlockReportRequestProto defaultInstance;
public static TriggerBlockReportRequestProto getDefaultInstance() {
return defaultInstance;
}
public TriggerBlockReportRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private TriggerBlockReportRequestProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
incremental_ = input.readBool();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_TriggerBlockReportRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_TriggerBlockReportRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public TriggerBlockReportRequestProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new TriggerBlockReportRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required bool incremental = 1;
public static final int INCREMENTAL_FIELD_NUMBER = 1;
private boolean incremental_;
/**
* required bool incremental = 1;
*/
public boolean hasIncremental() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required bool incremental = 1;
*/
public boolean getIncremental() {
return incremental_;
}
private void initFields() {
incremental_ = false;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasIncremental()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBool(1, incremental_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(1, incremental_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto) obj;
boolean result = true;
result = result && (hasIncremental() == other.hasIncremental());
if (hasIncremental()) {
result = result && (getIncremental()
== other.getIncremental());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasIncremental()) {
hash = (37 * hash) + INCREMENTAL_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getIncremental());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.TriggerBlockReportRequestProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_TriggerBlockReportRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_TriggerBlockReportRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
incremental_ = false;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_TriggerBlockReportRequestProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.incremental_ = incremental_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto.getDefaultInstance()) return this;
if (other.hasIncremental()) {
setIncremental(other.getIncremental());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasIncremental()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required bool incremental = 1;
private boolean incremental_ ;
/**
* required bool incremental = 1;
*/
public boolean hasIncremental() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required bool incremental = 1;
*/
public boolean getIncremental() {
return incremental_;
}
/**
* required bool incremental = 1;
*/
public Builder setIncremental(boolean value) {
bitField0_ |= 0x00000001;
incremental_ = value;
onChanged();
return this;
}
/**
* required bool incremental = 1;
*/
public Builder clearIncremental() {
bitField0_ = (bitField0_ & ~0x00000001);
incremental_ = false;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.TriggerBlockReportRequestProto)
}
static {
defaultInstance = new TriggerBlockReportRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.TriggerBlockReportRequestProto)
}
public interface TriggerBlockReportResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.TriggerBlockReportResponseProto}
*/
public static final class TriggerBlockReportResponseProto extends
com.google.protobuf.GeneratedMessage
implements TriggerBlockReportResponseProtoOrBuilder {
// Use TriggerBlockReportResponseProto.newBuilder() to construct.
private TriggerBlockReportResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private TriggerBlockReportResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final TriggerBlockReportResponseProto defaultInstance;
public static TriggerBlockReportResponseProto getDefaultInstance() {
return defaultInstance;
}
public TriggerBlockReportResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private TriggerBlockReportResponseProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_TriggerBlockReportResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_TriggerBlockReportResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public TriggerBlockReportResponseProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new TriggerBlockReportResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.TriggerBlockReportResponseProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_TriggerBlockReportResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_TriggerBlockReportResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_TriggerBlockReportResponseProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.TriggerBlockReportResponseProto)
}
static {
defaultInstance = new TriggerBlockReportResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.TriggerBlockReportResponseProto)
}
public interface GetReconfigurationStatusRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.GetReconfigurationStatusRequestProto}
*
*
** Query the running status of reconfiguration process
*
*/
public static final class GetReconfigurationStatusRequestProto extends
com.google.protobuf.GeneratedMessage
implements GetReconfigurationStatusRequestProtoOrBuilder {
// Use GetReconfigurationStatusRequestProto.newBuilder() to construct.
private GetReconfigurationStatusRequestProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private GetReconfigurationStatusRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final GetReconfigurationStatusRequestProto defaultInstance;
public static GetReconfigurationStatusRequestProto getDefaultInstance() {
return defaultInstance;
}
public GetReconfigurationStatusRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetReconfigurationStatusRequestProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReconfigurationStatusRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReconfigurationStatusRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public GetReconfigurationStatusRequestProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new GetReconfigurationStatusRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.GetReconfigurationStatusRequestProto}
*
*
** Query the running status of reconfiguration process
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReconfigurationStatusRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReconfigurationStatusRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReconfigurationStatusRequestProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetReconfigurationStatusRequestProto)
}
static {
defaultInstance = new GetReconfigurationStatusRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetReconfigurationStatusRequestProto)
}
public interface GetReconfigurationStatusConfigChangeProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string name = 1;
/**
* required string name = 1;
*/
boolean hasName();
/**
* required string name = 1;
*/
java.lang.String getName();
/**
* required string name = 1;
*/
com.google.protobuf.ByteString
getNameBytes();
// required string oldValue = 2;
/**
* required string oldValue = 2;
*/
boolean hasOldValue();
/**
* required string oldValue = 2;
*/
java.lang.String getOldValue();
/**
* required string oldValue = 2;
*/
com.google.protobuf.ByteString
getOldValueBytes();
// optional string newValue = 3;
/**
* optional string newValue = 3;
*/
boolean hasNewValue();
/**
* optional string newValue = 3;
*/
java.lang.String getNewValue();
/**
* optional string newValue = 3;
*/
com.google.protobuf.ByteString
getNewValueBytes();
// optional string errorMessage = 4;
/**
* optional string errorMessage = 4;
*
*
* It is empty if success.
*
*/
boolean hasErrorMessage();
/**
* optional string errorMessage = 4;
*
*
* It is empty if success.
*
*/
java.lang.String getErrorMessage();
/**
* optional string errorMessage = 4;
*
*
* It is empty if success.
*
*/
com.google.protobuf.ByteString
getErrorMessageBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.GetReconfigurationStatusConfigChangeProto}
*/
public static final class GetReconfigurationStatusConfigChangeProto extends
com.google.protobuf.GeneratedMessage
implements GetReconfigurationStatusConfigChangeProtoOrBuilder {
// Use GetReconfigurationStatusConfigChangeProto.newBuilder() to construct.
private GetReconfigurationStatusConfigChangeProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private GetReconfigurationStatusConfigChangeProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final GetReconfigurationStatusConfigChangeProto defaultInstance;
public static GetReconfigurationStatusConfigChangeProto getDefaultInstance() {
return defaultInstance;
}
public GetReconfigurationStatusConfigChangeProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetReconfigurationStatusConfigChangeProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
name_ = input.readBytes();
break;
}
case 18: {
bitField0_ |= 0x00000002;
oldValue_ = input.readBytes();
break;
}
case 26: {
bitField0_ |= 0x00000004;
newValue_ = input.readBytes();
break;
}
case 34: {
bitField0_ |= 0x00000008;
errorMessage_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReconfigurationStatusConfigChangeProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReconfigurationStatusConfigChangeProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public GetReconfigurationStatusConfigChangeProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new GetReconfigurationStatusConfigChangeProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required string name = 1;
public static final int NAME_FIELD_NUMBER = 1;
private java.lang.Object name_;
/**
* required string name = 1;
*/
public boolean hasName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string name = 1;
*/
public java.lang.String getName() {
java.lang.Object ref = name_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
name_ = s;
}
return s;
}
}
/**
* required string name = 1;
*/
public com.google.protobuf.ByteString
getNameBytes() {
java.lang.Object ref = name_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
name_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required string oldValue = 2;
public static final int OLDVALUE_FIELD_NUMBER = 2;
private java.lang.Object oldValue_;
/**
* required string oldValue = 2;
*/
public boolean hasOldValue() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string oldValue = 2;
*/
public java.lang.String getOldValue() {
java.lang.Object ref = oldValue_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
oldValue_ = s;
}
return s;
}
}
/**
* required string oldValue = 2;
*/
public com.google.protobuf.ByteString
getOldValueBytes() {
java.lang.Object ref = oldValue_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
oldValue_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// optional string newValue = 3;
public static final int NEWVALUE_FIELD_NUMBER = 3;
private java.lang.Object newValue_;
/**
* optional string newValue = 3;
*/
public boolean hasNewValue() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional string newValue = 3;
*/
public java.lang.String getNewValue() {
java.lang.Object ref = newValue_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
newValue_ = s;
}
return s;
}
}
/**
* optional string newValue = 3;
*/
public com.google.protobuf.ByteString
getNewValueBytes() {
java.lang.Object ref = newValue_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
newValue_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// optional string errorMessage = 4;
public static final int ERRORMESSAGE_FIELD_NUMBER = 4;
private java.lang.Object errorMessage_;
/**
* optional string errorMessage = 4;
*
*
* It is empty if success.
*
*/
public boolean hasErrorMessage() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional string errorMessage = 4;
*
*
* It is empty if success.
*
*/
public java.lang.String getErrorMessage() {
java.lang.Object ref = errorMessage_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
errorMessage_ = s;
}
return s;
}
}
/**
* optional string errorMessage = 4;
*
*
* It is empty if success.
*
*/
public com.google.protobuf.ByteString
getErrorMessageBytes() {
java.lang.Object ref = errorMessage_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
errorMessage_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
name_ = "";
oldValue_ = "";
newValue_ = "";
errorMessage_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasName()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasOldValue()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getNameBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getOldValueBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, getNewValueBytes());
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeBytes(4, getErrorMessageBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getNameBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getOldValueBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, getNewValueBytes());
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(4, getErrorMessageBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto) obj;
boolean result = true;
result = result && (hasName() == other.hasName());
if (hasName()) {
result = result && getName()
.equals(other.getName());
}
result = result && (hasOldValue() == other.hasOldValue());
if (hasOldValue()) {
result = result && getOldValue()
.equals(other.getOldValue());
}
result = result && (hasNewValue() == other.hasNewValue());
if (hasNewValue()) {
result = result && getNewValue()
.equals(other.getNewValue());
}
result = result && (hasErrorMessage() == other.hasErrorMessage());
if (hasErrorMessage()) {
result = result && getErrorMessage()
.equals(other.getErrorMessage());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasName()) {
hash = (37 * hash) + NAME_FIELD_NUMBER;
hash = (53 * hash) + getName().hashCode();
}
if (hasOldValue()) {
hash = (37 * hash) + OLDVALUE_FIELD_NUMBER;
hash = (53 * hash) + getOldValue().hashCode();
}
if (hasNewValue()) {
hash = (37 * hash) + NEWVALUE_FIELD_NUMBER;
hash = (53 * hash) + getNewValue().hashCode();
}
if (hasErrorMessage()) {
hash = (37 * hash) + ERRORMESSAGE_FIELD_NUMBER;
hash = (53 * hash) + getErrorMessage().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.GetReconfigurationStatusConfigChangeProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReconfigurationStatusConfigChangeProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReconfigurationStatusConfigChangeProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
name_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
oldValue_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
newValue_ = "";
bitField0_ = (bitField0_ & ~0x00000004);
errorMessage_ = "";
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReconfigurationStatusConfigChangeProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto build() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.name_ = name_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.oldValue_ = oldValue_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.newValue_ = newValue_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.errorMessage_ = errorMessage_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto.getDefaultInstance()) return this;
if (other.hasName()) {
bitField0_ |= 0x00000001;
name_ = other.name_;
onChanged();
}
if (other.hasOldValue()) {
bitField0_ |= 0x00000002;
oldValue_ = other.oldValue_;
onChanged();
}
if (other.hasNewValue()) {
bitField0_ |= 0x00000004;
newValue_ = other.newValue_;
onChanged();
}
if (other.hasErrorMessage()) {
bitField0_ |= 0x00000008;
errorMessage_ = other.errorMessage_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasName()) {
return false;
}
if (!hasOldValue()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string name = 1;
private java.lang.Object name_ = "";
/**
* required string name = 1;
*/
public boolean hasName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string name = 1;
*/
public java.lang.String getName() {
java.lang.Object ref = name_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
name_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string name = 1;
*/
public com.google.protobuf.ByteString
getNameBytes() {
java.lang.Object ref = name_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
name_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string name = 1;
*/
public Builder setName(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
name_ = value;
onChanged();
return this;
}
/**
* required string name = 1;
*/
public Builder clearName() {
bitField0_ = (bitField0_ & ~0x00000001);
name_ = getDefaultInstance().getName();
onChanged();
return this;
}
/**
* required string name = 1;
*/
public Builder setNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
name_ = value;
onChanged();
return this;
}
// required string oldValue = 2;
private java.lang.Object oldValue_ = "";
/**
* required string oldValue = 2;
*/
public boolean hasOldValue() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string oldValue = 2;
*/
public java.lang.String getOldValue() {
java.lang.Object ref = oldValue_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
oldValue_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string oldValue = 2;
*/
public com.google.protobuf.ByteString
getOldValueBytes() {
java.lang.Object ref = oldValue_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
oldValue_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string oldValue = 2;
*/
public Builder setOldValue(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
oldValue_ = value;
onChanged();
return this;
}
/**
* required string oldValue = 2;
*/
public Builder clearOldValue() {
bitField0_ = (bitField0_ & ~0x00000002);
oldValue_ = getDefaultInstance().getOldValue();
onChanged();
return this;
}
/**
* required string oldValue = 2;
*/
public Builder setOldValueBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
oldValue_ = value;
onChanged();
return this;
}
// optional string newValue = 3;
private java.lang.Object newValue_ = "";
/**
* optional string newValue = 3;
*/
public boolean hasNewValue() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional string newValue = 3;
*/
public java.lang.String getNewValue() {
java.lang.Object ref = newValue_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
newValue_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string newValue = 3;
*/
public com.google.protobuf.ByteString
getNewValueBytes() {
java.lang.Object ref = newValue_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
newValue_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* optional string newValue = 3;
*/
public Builder setNewValue(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
newValue_ = value;
onChanged();
return this;
}
/**
* optional string newValue = 3;
*/
public Builder clearNewValue() {
bitField0_ = (bitField0_ & ~0x00000004);
newValue_ = getDefaultInstance().getNewValue();
onChanged();
return this;
}
/**
* optional string newValue = 3;
*/
public Builder setNewValueBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
newValue_ = value;
onChanged();
return this;
}
// optional string errorMessage = 4;
private java.lang.Object errorMessage_ = "";
/**
* optional string errorMessage = 4;
*
*
* It is empty if success.
*
*/
public boolean hasErrorMessage() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional string errorMessage = 4;
*
*
* It is empty if success.
*
*/
public java.lang.String getErrorMessage() {
java.lang.Object ref = errorMessage_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
errorMessage_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string errorMessage = 4;
*
*
* It is empty if success.
*
*/
public com.google.protobuf.ByteString
getErrorMessageBytes() {
java.lang.Object ref = errorMessage_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
errorMessage_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* optional string errorMessage = 4;
*
*
* It is empty if success.
*
*/
public Builder setErrorMessage(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
errorMessage_ = value;
onChanged();
return this;
}
/**
* optional string errorMessage = 4;
*
*
* It is empty if success.
*
*/
public Builder clearErrorMessage() {
bitField0_ = (bitField0_ & ~0x00000008);
errorMessage_ = getDefaultInstance().getErrorMessage();
onChanged();
return this;
}
/**
* optional string errorMessage = 4;
*
*
* It is empty if success.
*
*/
public Builder setErrorMessageBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
errorMessage_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetReconfigurationStatusConfigChangeProto)
}
static {
defaultInstance = new GetReconfigurationStatusConfigChangeProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetReconfigurationStatusConfigChangeProto)
}
public interface GetReconfigurationStatusResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required int64 startTime = 1;
/**
* required int64 startTime = 1;
*/
boolean hasStartTime();
/**
* required int64 startTime = 1;
*/
long getStartTime();
// optional int64 endTime = 2;
/**
* optional int64 endTime = 2;
*/
boolean hasEndTime();
/**
* optional int64 endTime = 2;
*/
long getEndTime();
// repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
/**
* repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
*/
java.util.List
getChangesList();
/**
* repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto getChanges(int index);
/**
* repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
*/
int getChangesCount();
/**
* repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProtoOrBuilder>
getChangesOrBuilderList();
/**
* repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProtoOrBuilder getChangesOrBuilder(
int index);
}
/**
* Protobuf type {@code hadoop.hdfs.GetReconfigurationStatusResponseProto}
*/
public static final class GetReconfigurationStatusResponseProto extends
com.google.protobuf.GeneratedMessage
implements GetReconfigurationStatusResponseProtoOrBuilder {
// Use GetReconfigurationStatusResponseProto.newBuilder() to construct.
private GetReconfigurationStatusResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private GetReconfigurationStatusResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final GetReconfigurationStatusResponseProto defaultInstance;
public static GetReconfigurationStatusResponseProto getDefaultInstance() {
return defaultInstance;
}
public GetReconfigurationStatusResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetReconfigurationStatusResponseProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
startTime_ = input.readInt64();
break;
}
case 16: {
bitField0_ |= 0x00000002;
endTime_ = input.readInt64();
break;
}
case 26: {
if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
changes_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000004;
}
changes_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto.PARSER, extensionRegistry));
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
changes_ = java.util.Collections.unmodifiableList(changes_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReconfigurationStatusResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReconfigurationStatusResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public GetReconfigurationStatusResponseProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new GetReconfigurationStatusResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required int64 startTime = 1;
public static final int STARTTIME_FIELD_NUMBER = 1;
private long startTime_;
/**
* required int64 startTime = 1;
*/
public boolean hasStartTime() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required int64 startTime = 1;
*/
public long getStartTime() {
return startTime_;
}
// optional int64 endTime = 2;
public static final int ENDTIME_FIELD_NUMBER = 2;
private long endTime_;
/**
* optional int64 endTime = 2;
*/
public boolean hasEndTime() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional int64 endTime = 2;
*/
public long getEndTime() {
return endTime_;
}
// repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
public static final int CHANGES_FIELD_NUMBER = 3;
private java.util.List changes_;
/**
* repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
*/
public java.util.List getChangesList() {
return changes_;
}
/**
* repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProtoOrBuilder>
getChangesOrBuilderList() {
return changes_;
}
/**
* repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
*/
public int getChangesCount() {
return changes_.size();
}
/**
* repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto getChanges(int index) {
return changes_.get(index);
}
/**
* repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProtoOrBuilder getChangesOrBuilder(
int index) {
return changes_.get(index);
}
private void initFields() {
startTime_ = 0L;
endTime_ = 0L;
changes_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasStartTime()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getChangesCount(); i++) {
if (!getChanges(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeInt64(1, startTime_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeInt64(2, endTime_);
}
for (int i = 0; i < changes_.size(); i++) {
output.writeMessage(3, changes_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(1, startTime_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(2, endTime_);
}
for (int i = 0; i < changes_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, changes_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto) obj;
boolean result = true;
result = result && (hasStartTime() == other.hasStartTime());
if (hasStartTime()) {
result = result && (getStartTime()
== other.getStartTime());
}
result = result && (hasEndTime() == other.hasEndTime());
if (hasEndTime()) {
result = result && (getEndTime()
== other.getEndTime());
}
result = result && getChangesList()
.equals(other.getChangesList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasStartTime()) {
hash = (37 * hash) + STARTTIME_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getStartTime());
}
if (hasEndTime()) {
hash = (37 * hash) + ENDTIME_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getEndTime());
}
if (getChangesCount() > 0) {
hash = (37 * hash) + CHANGES_FIELD_NUMBER;
hash = (53 * hash) + getChangesList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.GetReconfigurationStatusResponseProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReconfigurationStatusResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReconfigurationStatusResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getChangesFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
startTime_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
endTime_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
if (changesBuilder_ == null) {
changes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
} else {
changesBuilder_.clear();
}
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReconfigurationStatusResponseProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.startTime_ = startTime_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.endTime_ = endTime_;
if (changesBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004)) {
changes_ = java.util.Collections.unmodifiableList(changes_);
bitField0_ = (bitField0_ & ~0x00000004);
}
result.changes_ = changes_;
} else {
result.changes_ = changesBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto.getDefaultInstance()) return this;
if (other.hasStartTime()) {
setStartTime(other.getStartTime());
}
if (other.hasEndTime()) {
setEndTime(other.getEndTime());
}
if (changesBuilder_ == null) {
if (!other.changes_.isEmpty()) {
if (changes_.isEmpty()) {
changes_ = other.changes_;
bitField0_ = (bitField0_ & ~0x00000004);
} else {
ensureChangesIsMutable();
changes_.addAll(other.changes_);
}
onChanged();
}
} else {
if (!other.changes_.isEmpty()) {
if (changesBuilder_.isEmpty()) {
changesBuilder_.dispose();
changesBuilder_ = null;
changes_ = other.changes_;
bitField0_ = (bitField0_ & ~0x00000004);
changesBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getChangesFieldBuilder() : null;
} else {
changesBuilder_.addAllMessages(other.changes_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasStartTime()) {
return false;
}
for (int i = 0; i < getChangesCount(); i++) {
if (!getChanges(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required int64 startTime = 1;
private long startTime_ ;
/**
* required int64 startTime = 1;
*/
public boolean hasStartTime() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required int64 startTime = 1;
*/
public long getStartTime() {
return startTime_;
}
/**
* required int64 startTime = 1;
*/
public Builder setStartTime(long value) {
bitField0_ |= 0x00000001;
startTime_ = value;
onChanged();
return this;
}
/**
* required int64 startTime = 1;
*/
public Builder clearStartTime() {
bitField0_ = (bitField0_ & ~0x00000001);
startTime_ = 0L;
onChanged();
return this;
}
// optional int64 endTime = 2;
private long endTime_ ;
/**
* optional int64 endTime = 2;
*/
public boolean hasEndTime() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional int64 endTime = 2;
*/
public long getEndTime() {
return endTime_;
}
/**
* optional int64 endTime = 2;
*/
public Builder setEndTime(long value) {
bitField0_ |= 0x00000002;
endTime_ = value;
onChanged();
return this;
}
/**
* optional int64 endTime = 2;
*/
public Builder clearEndTime() {
bitField0_ = (bitField0_ & ~0x00000002);
endTime_ = 0L;
onChanged();
return this;
}
// repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
private java.util.List changes_ =
java.util.Collections.emptyList();
private void ensureChangesIsMutable() {
if (!((bitField0_ & 0x00000004) == 0x00000004)) {
changes_ = new java.util.ArrayList(changes_);
bitField0_ |= 0x00000004;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProtoOrBuilder> changesBuilder_;
/**
* repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
*/
public java.util.List getChangesList() {
if (changesBuilder_ == null) {
return java.util.Collections.unmodifiableList(changes_);
} else {
return changesBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
*/
public int getChangesCount() {
if (changesBuilder_ == null) {
return changes_.size();
} else {
return changesBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto getChanges(int index) {
if (changesBuilder_ == null) {
return changes_.get(index);
} else {
return changesBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
*/
public Builder setChanges(
int index, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto value) {
if (changesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureChangesIsMutable();
changes_.set(index, value);
onChanged();
} else {
changesBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
*/
public Builder setChanges(
int index, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto.Builder builderForValue) {
if (changesBuilder_ == null) {
ensureChangesIsMutable();
changes_.set(index, builderForValue.build());
onChanged();
} else {
changesBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
*/
public Builder addChanges(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto value) {
if (changesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureChangesIsMutable();
changes_.add(value);
onChanged();
} else {
changesBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
*/
public Builder addChanges(
int index, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto value) {
if (changesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureChangesIsMutable();
changes_.add(index, value);
onChanged();
} else {
changesBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
*/
public Builder addChanges(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto.Builder builderForValue) {
if (changesBuilder_ == null) {
ensureChangesIsMutable();
changes_.add(builderForValue.build());
onChanged();
} else {
changesBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
*/
public Builder addChanges(
int index, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto.Builder builderForValue) {
if (changesBuilder_ == null) {
ensureChangesIsMutable();
changes_.add(index, builderForValue.build());
onChanged();
} else {
changesBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
*/
public Builder addAllChanges(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto> values) {
if (changesBuilder_ == null) {
ensureChangesIsMutable();
super.addAll(values, changes_);
onChanged();
} else {
changesBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
*/
public Builder clearChanges() {
if (changesBuilder_ == null) {
changes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
onChanged();
} else {
changesBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
*/
public Builder removeChanges(int index) {
if (changesBuilder_ == null) {
ensureChangesIsMutable();
changes_.remove(index);
onChanged();
} else {
changesBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto.Builder getChangesBuilder(
int index) {
return getChangesFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProtoOrBuilder getChangesOrBuilder(
int index) {
if (changesBuilder_ == null) {
return changes_.get(index); } else {
return changesBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProtoOrBuilder>
getChangesOrBuilderList() {
if (changesBuilder_ != null) {
return changesBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(changes_);
}
}
/**
* repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto.Builder addChangesBuilder() {
return getChangesFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto.Builder addChangesBuilder(
int index) {
return getChangesFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.GetReconfigurationStatusConfigChangeProto changes = 3;
*/
public java.util.List
getChangesBuilderList() {
return getChangesFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProtoOrBuilder>
getChangesFieldBuilder() {
if (changesBuilder_ == null) {
changesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProtoOrBuilder>(
changes_,
((bitField0_ & 0x00000004) == 0x00000004),
getParentForChildren(),
isClean());
changes_ = null;
}
return changesBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetReconfigurationStatusResponseProto)
}
static {
defaultInstance = new GetReconfigurationStatusResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetReconfigurationStatusResponseProto)
}
/**
* Protobuf service {@code hadoop.hdfs.ClientDatanodeProtocolService}
*
*
**
* Protocol used from client to the Datanode.
* See the request and response for details of rpc call.
*
*/
public static abstract class ClientDatanodeProtocolService
implements com.google.protobuf.Service {
protected ClientDatanodeProtocolService() {}
public interface Interface {
/**
* rpc getReplicaVisibleLength(.hadoop.hdfs.GetReplicaVisibleLengthRequestProto) returns (.hadoop.hdfs.GetReplicaVisibleLengthResponseProto);
*
*
**
* Returns the visible length of the replica
*
*/
public abstract void getReplicaVisibleLength(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc refreshNamenodes(.hadoop.hdfs.RefreshNamenodesRequestProto) returns (.hadoop.hdfs.RefreshNamenodesResponseProto);
*
*
**
* Refresh the list of federated namenodes from updated configuration.
* Adds new namenodes and stops the deleted namenodes.
*
*/
public abstract void refreshNamenodes(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc deleteBlockPool(.hadoop.hdfs.DeleteBlockPoolRequestProto) returns (.hadoop.hdfs.DeleteBlockPoolResponseProto);
*
*
**
* Delete the block pool from the datanode.
*
*/
public abstract void deleteBlockPool(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc getBlockLocalPathInfo(.hadoop.hdfs.GetBlockLocalPathInfoRequestProto) returns (.hadoop.hdfs.GetBlockLocalPathInfoResponseProto);
*
*
**
* Retrieves the path names of the block file and metadata file stored on the
* local file system.
*
*/
public abstract void getBlockLocalPathInfo(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc getHdfsBlockLocations(.hadoop.hdfs.GetHdfsBlockLocationsRequestProto) returns (.hadoop.hdfs.GetHdfsBlockLocationsResponseProto);
*
*
**
* Retrieve additional HDFS-specific metadata about a set of blocks stored
* on the local file system.
*
*/
public abstract void getHdfsBlockLocations(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc shutdownDatanode(.hadoop.hdfs.ShutdownDatanodeRequestProto) returns (.hadoop.hdfs.ShutdownDatanodeResponseProto);
*/
public abstract void shutdownDatanode(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc getDatanodeInfo(.hadoop.hdfs.GetDatanodeInfoRequestProto) returns (.hadoop.hdfs.GetDatanodeInfoResponseProto);
*/
public abstract void getDatanodeInfo(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc getReconfigurationStatus(.hadoop.hdfs.GetReconfigurationStatusRequestProto) returns (.hadoop.hdfs.GetReconfigurationStatusResponseProto);
*/
public abstract void getReconfigurationStatus(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc startReconfiguration(.hadoop.hdfs.StartReconfigurationRequestProto) returns (.hadoop.hdfs.StartReconfigurationResponseProto);
*/
public abstract void startReconfiguration(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc triggerBlockReport(.hadoop.hdfs.TriggerBlockReportRequestProto) returns (.hadoop.hdfs.TriggerBlockReportResponseProto);
*/
public abstract void triggerBlockReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto request,
com.google.protobuf.RpcCallback done);
}
public static com.google.protobuf.Service newReflectiveService(
final Interface impl) {
return new ClientDatanodeProtocolService() {
@java.lang.Override
public void getReplicaVisibleLength(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.getReplicaVisibleLength(controller, request, done);
}
@java.lang.Override
public void refreshNamenodes(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.refreshNamenodes(controller, request, done);
}
@java.lang.Override
public void deleteBlockPool(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.deleteBlockPool(controller, request, done);
}
@java.lang.Override
public void getBlockLocalPathInfo(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.getBlockLocalPathInfo(controller, request, done);
}
@java.lang.Override
public void getHdfsBlockLocations(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.getHdfsBlockLocations(controller, request, done);
}
@java.lang.Override
public void shutdownDatanode(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.shutdownDatanode(controller, request, done);
}
@java.lang.Override
public void getDatanodeInfo(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.getDatanodeInfo(controller, request, done);
}
@java.lang.Override
public void getReconfigurationStatus(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.getReconfigurationStatus(controller, request, done);
}
@java.lang.Override
public void startReconfiguration(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.startReconfiguration(controller, request, done);
}
@java.lang.Override
public void triggerBlockReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.triggerBlockReport(controller, request, done);
}
};
}
public static com.google.protobuf.BlockingService
newReflectiveBlockingService(final BlockingInterface impl) {
return new com.google.protobuf.BlockingService() {
public final com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptorForType() {
return getDescriptor();
}
public final com.google.protobuf.Message callBlockingMethod(
com.google.protobuf.Descriptors.MethodDescriptor method,
com.google.protobuf.RpcController controller,
com.google.protobuf.Message request)
throws com.google.protobuf.ServiceException {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.callBlockingMethod() given method descriptor for " +
"wrong service type.");
}
switch(method.getIndex()) {
case 0:
return impl.getReplicaVisibleLength(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto)request);
case 1:
return impl.refreshNamenodes(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto)request);
case 2:
return impl.deleteBlockPool(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto)request);
case 3:
return impl.getBlockLocalPathInfo(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto)request);
case 4:
return impl.getHdfsBlockLocations(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto)request);
case 5:
return impl.shutdownDatanode(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto)request);
case 6:
return impl.getDatanodeInfo(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto)request);
case 7:
return impl.getReconfigurationStatus(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto)request);
case 8:
return impl.startReconfiguration(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto)request);
case 9:
return impl.triggerBlockReport(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto)request);
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getRequestPrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getRequestPrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.getDefaultInstance();
case 1:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.getDefaultInstance();
case 2:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.getDefaultInstance();
case 3:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.getDefaultInstance();
case 4:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto.getDefaultInstance();
case 5:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto.getDefaultInstance();
case 6:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto.getDefaultInstance();
case 7:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto.getDefaultInstance();
case 8:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto.getDefaultInstance();
case 9:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getResponsePrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getResponsePrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance();
case 1:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance();
case 2:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance();
case 3:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance();
case 4:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto.getDefaultInstance();
case 5:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.getDefaultInstance();
case 6:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.getDefaultInstance();
case 7:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto.getDefaultInstance();
case 8:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto.getDefaultInstance();
case 9:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
};
}
/**
* rpc getReplicaVisibleLength(.hadoop.hdfs.GetReplicaVisibleLengthRequestProto) returns (.hadoop.hdfs.GetReplicaVisibleLengthResponseProto);
*
*
**
* Returns the visible length of the replica
*
*/
public abstract void getReplicaVisibleLength(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc refreshNamenodes(.hadoop.hdfs.RefreshNamenodesRequestProto) returns (.hadoop.hdfs.RefreshNamenodesResponseProto);
*
*
**
* Refresh the list of federated namenodes from updated configuration.
* Adds new namenodes and stops the deleted namenodes.
*
*/
public abstract void refreshNamenodes(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc deleteBlockPool(.hadoop.hdfs.DeleteBlockPoolRequestProto) returns (.hadoop.hdfs.DeleteBlockPoolResponseProto);
*
*
**
* Delete the block pool from the datanode.
*
*/
public abstract void deleteBlockPool(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc getBlockLocalPathInfo(.hadoop.hdfs.GetBlockLocalPathInfoRequestProto) returns (.hadoop.hdfs.GetBlockLocalPathInfoResponseProto);
*
*
**
* Retrieves the path names of the block file and metadata file stored on the
* local file system.
*
*/
public abstract void getBlockLocalPathInfo(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc getHdfsBlockLocations(.hadoop.hdfs.GetHdfsBlockLocationsRequestProto) returns (.hadoop.hdfs.GetHdfsBlockLocationsResponseProto);
*
*
**
* Retrieve additional HDFS-specific metadata about a set of blocks stored
* on the local file system.
*
*/
public abstract void getHdfsBlockLocations(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc shutdownDatanode(.hadoop.hdfs.ShutdownDatanodeRequestProto) returns (.hadoop.hdfs.ShutdownDatanodeResponseProto);
*/
public abstract void shutdownDatanode(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc getDatanodeInfo(.hadoop.hdfs.GetDatanodeInfoRequestProto) returns (.hadoop.hdfs.GetDatanodeInfoResponseProto);
*/
public abstract void getDatanodeInfo(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc getReconfigurationStatus(.hadoop.hdfs.GetReconfigurationStatusRequestProto) returns (.hadoop.hdfs.GetReconfigurationStatusResponseProto);
*/
public abstract void getReconfigurationStatus(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc startReconfiguration(.hadoop.hdfs.StartReconfigurationRequestProto) returns (.hadoop.hdfs.StartReconfigurationResponseProto);
*/
public abstract void startReconfiguration(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc triggerBlockReport(.hadoop.hdfs.TriggerBlockReportRequestProto) returns (.hadoop.hdfs.TriggerBlockReportResponseProto);
*/
public abstract void triggerBlockReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto request,
com.google.protobuf.RpcCallback done);
public static final
com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.getDescriptor().getServices().get(0);
}
public final com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptorForType() {
return getDescriptor();
}
public final void callMethod(
com.google.protobuf.Descriptors.MethodDescriptor method,
com.google.protobuf.RpcController controller,
com.google.protobuf.Message request,
com.google.protobuf.RpcCallback<
com.google.protobuf.Message> done) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.callMethod() given method descriptor for wrong " +
"service type.");
}
switch(method.getIndex()) {
case 0:
this.getReplicaVisibleLength(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 1:
this.refreshNamenodes(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 2:
this.deleteBlockPool(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 3:
this.getBlockLocalPathInfo(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 4:
this.getHdfsBlockLocations(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 5:
this.shutdownDatanode(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 6:
this.getDatanodeInfo(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 7:
this.getReconfigurationStatus(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 8:
this.startReconfiguration(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 9:
this.triggerBlockReport(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getRequestPrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getRequestPrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.getDefaultInstance();
case 1:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.getDefaultInstance();
case 2:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.getDefaultInstance();
case 3:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.getDefaultInstance();
case 4:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto.getDefaultInstance();
case 5:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto.getDefaultInstance();
case 6:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto.getDefaultInstance();
case 7:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto.getDefaultInstance();
case 8:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto.getDefaultInstance();
case 9:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getResponsePrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getResponsePrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance();
case 1:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance();
case 2:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance();
case 3:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance();
case 4:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto.getDefaultInstance();
case 5:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.getDefaultInstance();
case 6:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.getDefaultInstance();
case 7:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto.getDefaultInstance();
case 8:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto.getDefaultInstance();
case 9:
return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public static Stub newStub(
com.google.protobuf.RpcChannel channel) {
return new Stub(channel);
}
public static final class Stub extends org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService implements Interface {
private Stub(com.google.protobuf.RpcChannel channel) {
this.channel = channel;
}
private final com.google.protobuf.RpcChannel channel;
public com.google.protobuf.RpcChannel getChannel() {
return channel;
}
public void getReplicaVisibleLength(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(0),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance()));
}
public void refreshNamenodes(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(1),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance()));
}
public void deleteBlockPool(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(2),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance()));
}
public void getBlockLocalPathInfo(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(3),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance()));
}
public void getHdfsBlockLocations(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(4),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto.getDefaultInstance()));
}
public void shutdownDatanode(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(5),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.getDefaultInstance()));
}
public void getDatanodeInfo(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(6),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.getDefaultInstance()));
}
public void getReconfigurationStatus(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(7),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto.getDefaultInstance()));
}
public void startReconfiguration(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(8),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto.getDefaultInstance()));
}
public void triggerBlockReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(9),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.getDefaultInstance()));
}
}
public static BlockingInterface newBlockingStub(
com.google.protobuf.BlockingRpcChannel channel) {
return new BlockingStub(channel);
}
public interface BlockingInterface {
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto getReplicaVisibleLength(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto refreshNamenodes(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto deleteBlockPool(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto getBlockLocalPathInfo(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto getHdfsBlockLocations(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto shutdownDatanode(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto getDatanodeInfo(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto getReconfigurationStatus(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto startReconfiguration(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto triggerBlockReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto request)
throws com.google.protobuf.ServiceException;
}
private static final class BlockingStub implements BlockingInterface {
private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
this.channel = channel;
}
private final com.google.protobuf.BlockingRpcChannel channel;
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto getReplicaVisibleLength(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(0),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance());
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto refreshNamenodes(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(1),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance());
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto deleteBlockPool(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(2),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance());
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto getBlockLocalPathInfo(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(3),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance());
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto getHdfsBlockLocations(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(4),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto.getDefaultInstance());
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto shutdownDatanode(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(5),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.getDefaultInstance());
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto getDatanodeInfo(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(6),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.getDefaultInstance());
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto getReconfigurationStatus(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(7),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto.getDefaultInstance());
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto startReconfiguration(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(8),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto.getDefaultInstance());
}
public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto triggerBlockReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(9),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.getDefaultInstance());
}
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ClientDatanodeProtocolService)
}
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetReplicaVisibleLengthRequestProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_GetReplicaVisibleLengthRequestProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetReplicaVisibleLengthResponseProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_GetReplicaVisibleLengthResponseProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_RefreshNamenodesRequestProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_RefreshNamenodesRequestProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_RefreshNamenodesResponseProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_RefreshNamenodesResponseProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_DeleteBlockPoolRequestProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_DeleteBlockPoolRequestProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_DeleteBlockPoolResponseProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_DeleteBlockPoolResponseProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetBlockLocalPathInfoRequestProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_GetBlockLocalPathInfoRequestProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetBlockLocalPathInfoResponseProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_GetBlockLocalPathInfoResponseProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetHdfsBlockLocationsRequestProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_GetHdfsBlockLocationsRequestProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetHdfsBlockLocationsResponseProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_GetHdfsBlockLocationsResponseProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ShutdownDatanodeRequestProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_ShutdownDatanodeRequestProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ShutdownDatanodeResponseProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_ShutdownDatanodeResponseProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetDatanodeInfoRequestProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_GetDatanodeInfoRequestProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetDatanodeInfoResponseProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_GetDatanodeInfoResponseProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_StartReconfigurationRequestProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_StartReconfigurationRequestProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_StartReconfigurationResponseProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_StartReconfigurationResponseProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_TriggerBlockReportRequestProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_TriggerBlockReportRequestProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_TriggerBlockReportResponseProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_TriggerBlockReportResponseProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetReconfigurationStatusRequestProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_GetReconfigurationStatusRequestProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetReconfigurationStatusConfigChangeProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_GetReconfigurationStatusConfigChangeProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetReconfigurationStatusResponseProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_GetReconfigurationStatusResponseProto_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
}
private static com.google.protobuf.Descriptors.FileDescriptor
descriptor;
static {
java.lang.String[] descriptorData = {
"\n\034ClientDatanodeProtocol.proto\022\013hadoop.h" +
"dfs\032\016Security.proto\032\nhdfs.proto\"U\n#GetRe" +
"plicaVisibleLengthRequestProto\022.\n\005block\030" +
"\001 \002(\0132\037.hadoop.hdfs.ExtendedBlockProto\"6" +
"\n$GetReplicaVisibleLengthResponseProto\022\016" +
"\n\006length\030\001 \002(\004\"\036\n\034RefreshNamenodesReques" +
"tProto\"\037\n\035RefreshNamenodesResponseProto\"" +
"?\n\033DeleteBlockPoolRequestProto\022\021\n\tblockP" +
"ool\030\001 \002(\t\022\r\n\005force\030\002 \002(\010\"\036\n\034DeleteBlockP" +
"oolResponseProto\"}\n!GetBlockLocalPathInf",
"oRequestProto\022.\n\005block\030\001 \002(\0132\037.hadoop.hd" +
"fs.ExtendedBlockProto\022(\n\005token\030\002 \002(\0132\031.h" +
"adoop.common.TokenProto\"~\n\"GetBlockLocal" +
"PathInfoResponseProto\022.\n\005block\030\001 \002(\0132\037.h" +
"adoop.hdfs.ExtendedBlockProto\022\021\n\tlocalPa" +
"th\030\002 \002(\t\022\025\n\rlocalMetaPath\030\003 \002(\t\"y\n!GetHd" +
"fsBlockLocationsRequestProto\022)\n\006tokens\030\002" +
" \003(\0132\031.hadoop.common.TokenProto\022\023\n\013block" +
"PoolId\030\003 \002(\t\022\024\n\010blockIds\030\004 \003(\020B\002\020\001\"R\n\"Ge" +
"tHdfsBlockLocationsResponseProto\022\021\n\tvolu",
"meIds\030\001 \003(\014\022\031\n\rvolumeIndexes\030\002 \003(\rB\002\020\001\"2" +
"\n\034ShutdownDatanodeRequestProto\022\022\n\nforUpg" +
"rade\030\001 \002(\010\"\037\n\035ShutdownDatanodeResponsePr" +
"oto\"\035\n\033GetDatanodeInfoRequestProto\"V\n\034Ge" +
"tDatanodeInfoResponseProto\0226\n\tlocalInfo\030" +
"\001 \002(\0132#.hadoop.hdfs.DatanodeLocalInfoPro" +
"to\"\"\n StartReconfigurationRequestProto\"#" +
"\n!StartReconfigurationResponseProto\"5\n\036T" +
"riggerBlockReportRequestProto\022\023\n\013increme" +
"ntal\030\001 \002(\010\"!\n\037TriggerBlockReportResponse",
"Proto\"&\n$GetReconfigurationStatusRequest" +
"Proto\"s\n)GetReconfigurationStatusConfigC" +
"hangeProto\022\014\n\004name\030\001 \002(\t\022\020\n\010oldValue\030\002 \002" +
"(\t\022\020\n\010newValue\030\003 \001(\t\022\024\n\014errorMessage\030\004 \001" +
"(\t\"\224\001\n%GetReconfigurationStatusResponseP" +
"roto\022\021\n\tstartTime\030\001 \002(\003\022\017\n\007endTime\030\002 \001(\003" +
"\022G\n\007changes\030\003 \003(\01326.hadoop.hdfs.GetRecon" +
"figurationStatusConfigChangeProto2\245\t\n\035Cl" +
"ientDatanodeProtocolService\022~\n\027getReplic" +
"aVisibleLength\0220.hadoop.hdfs.GetReplicaV",
"isibleLengthRequestProto\0321.hadoop.hdfs.G" +
"etReplicaVisibleLengthResponseProto\022i\n\020r" +
"efreshNamenodes\022).hadoop.hdfs.RefreshNam" +
"enodesRequestProto\032*.hadoop.hdfs.Refresh" +
"NamenodesResponseProto\022f\n\017deleteBlockPoo" +
"l\022(.hadoop.hdfs.DeleteBlockPoolRequestPr" +
"oto\032).hadoop.hdfs.DeleteBlockPoolRespons" +
"eProto\022x\n\025getBlockLocalPathInfo\022..hadoop" +
".hdfs.GetBlockLocalPathInfoRequestProto\032" +
"/.hadoop.hdfs.GetBlockLocalPathInfoRespo",
"nseProto\022x\n\025getHdfsBlockLocations\022..hado" +
"op.hdfs.GetHdfsBlockLocationsRequestProt" +
"o\032/.hadoop.hdfs.GetHdfsBlockLocationsRes" +
"ponseProto\022i\n\020shutdownDatanode\022).hadoop." +
"hdfs.ShutdownDatanodeRequestProto\032*.hado" +
"op.hdfs.ShutdownDatanodeResponseProto\022f\n" +
"\017getDatanodeInfo\022(.hadoop.hdfs.GetDatano" +
"deInfoRequestProto\032).hadoop.hdfs.GetData" +
"nodeInfoResponseProto\022\201\001\n\030getReconfigura" +
"tionStatus\0221.hadoop.hdfs.GetReconfigurat",
"ionStatusRequestProto\0322.hadoop.hdfs.GetR" +
"econfigurationStatusResponseProto\022u\n\024sta" +
"rtReconfiguration\022-.hadoop.hdfs.StartRec" +
"onfigurationRequestProto\032..hadoop.hdfs.S" +
"tartReconfigurationResponseProto\022o\n\022trig" +
"gerBlockReport\022+.hadoop.hdfs.TriggerBloc" +
"kReportRequestProto\032,.hadoop.hdfs.Trigge" +
"rBlockReportResponseProtoBK\n%org.apache." +
"hadoop.hdfs.protocol.protoB\034ClientDatano" +
"deProtocolProtos\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
public com.google.protobuf.ExtensionRegistry assignDescriptors(
com.google.protobuf.Descriptors.FileDescriptor root) {
descriptor = root;
internal_static_hadoop_hdfs_GetReplicaVisibleLengthRequestProto_descriptor =
getDescriptor().getMessageTypes().get(0);
internal_static_hadoop_hdfs_GetReplicaVisibleLengthRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_GetReplicaVisibleLengthRequestProto_descriptor,
new java.lang.String[] { "Block", });
internal_static_hadoop_hdfs_GetReplicaVisibleLengthResponseProto_descriptor =
getDescriptor().getMessageTypes().get(1);
internal_static_hadoop_hdfs_GetReplicaVisibleLengthResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_GetReplicaVisibleLengthResponseProto_descriptor,
new java.lang.String[] { "Length", });
internal_static_hadoop_hdfs_RefreshNamenodesRequestProto_descriptor =
getDescriptor().getMessageTypes().get(2);
internal_static_hadoop_hdfs_RefreshNamenodesRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_RefreshNamenodesRequestProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_RefreshNamenodesResponseProto_descriptor =
getDescriptor().getMessageTypes().get(3);
internal_static_hadoop_hdfs_RefreshNamenodesResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_RefreshNamenodesResponseProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_DeleteBlockPoolRequestProto_descriptor =
getDescriptor().getMessageTypes().get(4);
internal_static_hadoop_hdfs_DeleteBlockPoolRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_DeleteBlockPoolRequestProto_descriptor,
new java.lang.String[] { "BlockPool", "Force", });
internal_static_hadoop_hdfs_DeleteBlockPoolResponseProto_descriptor =
getDescriptor().getMessageTypes().get(5);
internal_static_hadoop_hdfs_DeleteBlockPoolResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_DeleteBlockPoolResponseProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_GetBlockLocalPathInfoRequestProto_descriptor =
getDescriptor().getMessageTypes().get(6);
internal_static_hadoop_hdfs_GetBlockLocalPathInfoRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_GetBlockLocalPathInfoRequestProto_descriptor,
new java.lang.String[] { "Block", "Token", });
internal_static_hadoop_hdfs_GetBlockLocalPathInfoResponseProto_descriptor =
getDescriptor().getMessageTypes().get(7);
internal_static_hadoop_hdfs_GetBlockLocalPathInfoResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_GetBlockLocalPathInfoResponseProto_descriptor,
new java.lang.String[] { "Block", "LocalPath", "LocalMetaPath", });
internal_static_hadoop_hdfs_GetHdfsBlockLocationsRequestProto_descriptor =
getDescriptor().getMessageTypes().get(8);
internal_static_hadoop_hdfs_GetHdfsBlockLocationsRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_GetHdfsBlockLocationsRequestProto_descriptor,
new java.lang.String[] { "Tokens", "BlockPoolId", "BlockIds", });
internal_static_hadoop_hdfs_GetHdfsBlockLocationsResponseProto_descriptor =
getDescriptor().getMessageTypes().get(9);
internal_static_hadoop_hdfs_GetHdfsBlockLocationsResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_GetHdfsBlockLocationsResponseProto_descriptor,
new java.lang.String[] { "VolumeIds", "VolumeIndexes", });
internal_static_hadoop_hdfs_ShutdownDatanodeRequestProto_descriptor =
getDescriptor().getMessageTypes().get(10);
internal_static_hadoop_hdfs_ShutdownDatanodeRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_ShutdownDatanodeRequestProto_descriptor,
new java.lang.String[] { "ForUpgrade", });
internal_static_hadoop_hdfs_ShutdownDatanodeResponseProto_descriptor =
getDescriptor().getMessageTypes().get(11);
internal_static_hadoop_hdfs_ShutdownDatanodeResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_ShutdownDatanodeResponseProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_GetDatanodeInfoRequestProto_descriptor =
getDescriptor().getMessageTypes().get(12);
internal_static_hadoop_hdfs_GetDatanodeInfoRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_GetDatanodeInfoRequestProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_GetDatanodeInfoResponseProto_descriptor =
getDescriptor().getMessageTypes().get(13);
internal_static_hadoop_hdfs_GetDatanodeInfoResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_GetDatanodeInfoResponseProto_descriptor,
new java.lang.String[] { "LocalInfo", });
internal_static_hadoop_hdfs_StartReconfigurationRequestProto_descriptor =
getDescriptor().getMessageTypes().get(14);
internal_static_hadoop_hdfs_StartReconfigurationRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_StartReconfigurationRequestProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_StartReconfigurationResponseProto_descriptor =
getDescriptor().getMessageTypes().get(15);
internal_static_hadoop_hdfs_StartReconfigurationResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_StartReconfigurationResponseProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_TriggerBlockReportRequestProto_descriptor =
getDescriptor().getMessageTypes().get(16);
internal_static_hadoop_hdfs_TriggerBlockReportRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_TriggerBlockReportRequestProto_descriptor,
new java.lang.String[] { "Incremental", });
internal_static_hadoop_hdfs_TriggerBlockReportResponseProto_descriptor =
getDescriptor().getMessageTypes().get(17);
internal_static_hadoop_hdfs_TriggerBlockReportResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_TriggerBlockReportResponseProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_GetReconfigurationStatusRequestProto_descriptor =
getDescriptor().getMessageTypes().get(18);
internal_static_hadoop_hdfs_GetReconfigurationStatusRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_GetReconfigurationStatusRequestProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_GetReconfigurationStatusConfigChangeProto_descriptor =
getDescriptor().getMessageTypes().get(19);
internal_static_hadoop_hdfs_GetReconfigurationStatusConfigChangeProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_GetReconfigurationStatusConfigChangeProto_descriptor,
new java.lang.String[] { "Name", "OldValue", "NewValue", "ErrorMessage", });
internal_static_hadoop_hdfs_GetReconfigurationStatusResponseProto_descriptor =
getDescriptor().getMessageTypes().get(20);
internal_static_hadoop_hdfs_GetReconfigurationStatusResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_GetReconfigurationStatusResponseProto_descriptor,
new java.lang.String[] { "StartTime", "EndTime", "Changes", });
return null;
}
};
com.google.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] {
org.apache.hadoop.security.proto.SecurityProtos.getDescriptor(),
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(),
}, assigner);
}
// @@protoc_insertion_point(outer_class_scope)
}
© 2015 - 2024 Weber Informatics LLC | Privacy Policy