Please wait. This can take some minutes ...
Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance.
Project price only 1 $
You can buy this project and download/modify it how often you want.
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos Maven / Gradle / Ivy
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: DatanodeProtocol.proto
package org.apache.hadoop.hdfs.protocol.proto;
public final class DatanodeProtocolProtos {
private DatanodeProtocolProtos() {}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
}
public interface DatanodeRegistrationProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;
/**
* required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;
*
*
* Datanode information
*
*/
boolean hasDatanodeID();
/**
* required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;
*
*
* Datanode information
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodeID();
/**
* required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;
*
*
* Datanode information
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeIDOrBuilder();
// required .hadoop.hdfs.StorageInfoProto storageInfo = 2;
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 2;
*
*
* Node information
*
*/
boolean hasStorageInfo();
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 2;
*
*
* Node information
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto getStorageInfo();
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 2;
*
*
* Node information
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder();
// required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;
/**
* required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;
*
*
* Block keys
*
*/
boolean hasKeys();
/**
* required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;
*
*
* Block keys
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto getKeys();
/**
* required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;
*
*
* Block keys
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder();
// required string softwareVersion = 4;
/**
* required string softwareVersion = 4;
*
*
* Software version of the DN, e.g. "2.0.0"
*
*/
boolean hasSoftwareVersion();
/**
* required string softwareVersion = 4;
*
*
* Software version of the DN, e.g. "2.0.0"
*
*/
java.lang.String getSoftwareVersion();
/**
* required string softwareVersion = 4;
*
*
* Software version of the DN, e.g. "2.0.0"
*
*/
com.google.protobuf.ByteString
getSoftwareVersionBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.DatanodeRegistrationProto}
*
*
**
* Information to identify a datanode to a namenode
*
*/
public static final class DatanodeRegistrationProto extends
com.google.protobuf.GeneratedMessage
implements DatanodeRegistrationProtoOrBuilder {
// Use DatanodeRegistrationProto.newBuilder() to construct.
private DatanodeRegistrationProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private DatanodeRegistrationProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final DatanodeRegistrationProto defaultInstance;
public static DatanodeRegistrationProto getDefaultInstance() {
return defaultInstance;
}
public DatanodeRegistrationProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DatanodeRegistrationProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = datanodeID_.toBuilder();
}
datanodeID_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(datanodeID_);
datanodeID_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
subBuilder = storageInfo_.toBuilder();
}
storageInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(storageInfo_);
storageInfo_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
case 26: {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
subBuilder = keys_.toBuilder();
}
keys_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(keys_);
keys_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000004;
break;
}
case 34: {
bitField0_ |= 0x00000008;
softwareVersion_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_DatanodeRegistrationProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_DatanodeRegistrationProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public DatanodeRegistrationProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new DatanodeRegistrationProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;
public static final int DATANODEID_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto datanodeID_;
/**
* required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;
*
*
* Datanode information
*
*/
public boolean hasDatanodeID() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;
*
*
* Datanode information
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodeID() {
return datanodeID_;
}
/**
* required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;
*
*
* Datanode information
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeIDOrBuilder() {
return datanodeID_;
}
// required .hadoop.hdfs.StorageInfoProto storageInfo = 2;
public static final int STORAGEINFO_FIELD_NUMBER = 2;
private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto storageInfo_;
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 2;
*
*
* Node information
*
*/
public boolean hasStorageInfo() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 2;
*
*
* Node information
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto getStorageInfo() {
return storageInfo_;
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 2;
*
*
* Node information
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() {
return storageInfo_;
}
// required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;
public static final int KEYS_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto keys_;
/**
* required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;
*
*
* Block keys
*
*/
public boolean hasKeys() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;
*
*
* Block keys
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto getKeys() {
return keys_;
}
/**
* required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;
*
*
* Block keys
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder() {
return keys_;
}
// required string softwareVersion = 4;
public static final int SOFTWAREVERSION_FIELD_NUMBER = 4;
private java.lang.Object softwareVersion_;
/**
* required string softwareVersion = 4;
*
*
* Software version of the DN, e.g. "2.0.0"
*
*/
public boolean hasSoftwareVersion() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required string softwareVersion = 4;
*
*
* Software version of the DN, e.g. "2.0.0"
*
*/
public java.lang.String getSoftwareVersion() {
java.lang.Object ref = softwareVersion_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
softwareVersion_ = s;
}
return s;
}
}
/**
* required string softwareVersion = 4;
*
*
* Software version of the DN, e.g. "2.0.0"
*
*/
public com.google.protobuf.ByteString
getSoftwareVersionBytes() {
java.lang.Object ref = softwareVersion_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
softwareVersion_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
datanodeID_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance();
storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance();
keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.getDefaultInstance();
softwareVersion_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasDatanodeID()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasStorageInfo()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasKeys()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasSoftwareVersion()) {
memoizedIsInitialized = 0;
return false;
}
if (!getDatanodeID().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
if (!getStorageInfo().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
if (!getKeys().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, datanodeID_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeMessage(2, storageInfo_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeMessage(3, keys_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeBytes(4, getSoftwareVersionBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, datanodeID_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, storageInfo_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, keys_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(4, getSoftwareVersionBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto) obj;
boolean result = true;
result = result && (hasDatanodeID() == other.hasDatanodeID());
if (hasDatanodeID()) {
result = result && getDatanodeID()
.equals(other.getDatanodeID());
}
result = result && (hasStorageInfo() == other.hasStorageInfo());
if (hasStorageInfo()) {
result = result && getStorageInfo()
.equals(other.getStorageInfo());
}
result = result && (hasKeys() == other.hasKeys());
if (hasKeys()) {
result = result && getKeys()
.equals(other.getKeys());
}
result = result && (hasSoftwareVersion() == other.hasSoftwareVersion());
if (hasSoftwareVersion()) {
result = result && getSoftwareVersion()
.equals(other.getSoftwareVersion());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasDatanodeID()) {
hash = (37 * hash) + DATANODEID_FIELD_NUMBER;
hash = (53 * hash) + getDatanodeID().hashCode();
}
if (hasStorageInfo()) {
hash = (37 * hash) + STORAGEINFO_FIELD_NUMBER;
hash = (53 * hash) + getStorageInfo().hashCode();
}
if (hasKeys()) {
hash = (37 * hash) + KEYS_FIELD_NUMBER;
hash = (53 * hash) + getKeys().hashCode();
}
if (hasSoftwareVersion()) {
hash = (37 * hash) + SOFTWAREVERSION_FIELD_NUMBER;
hash = (53 * hash) + getSoftwareVersion().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.DatanodeRegistrationProto}
*
*
**
* Information to identify a datanode to a namenode
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_DatanodeRegistrationProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_DatanodeRegistrationProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getDatanodeIDFieldBuilder();
getStorageInfoFieldBuilder();
getKeysFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (datanodeIDBuilder_ == null) {
datanodeID_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance();
} else {
datanodeIDBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
if (storageInfoBuilder_ == null) {
storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance();
} else {
storageInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
if (keysBuilder_ == null) {
keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.getDefaultInstance();
} else {
keysBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
softwareVersion_ = "";
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_DatanodeRegistrationProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (datanodeIDBuilder_ == null) {
result.datanodeID_ = datanodeID_;
} else {
result.datanodeID_ = datanodeIDBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
if (storageInfoBuilder_ == null) {
result.storageInfo_ = storageInfo_;
} else {
result.storageInfo_ = storageInfoBuilder_.build();
}
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
if (keysBuilder_ == null) {
result.keys_ = keys_;
} else {
result.keys_ = keysBuilder_.build();
}
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.softwareVersion_ = softwareVersion_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) return this;
if (other.hasDatanodeID()) {
mergeDatanodeID(other.getDatanodeID());
}
if (other.hasStorageInfo()) {
mergeStorageInfo(other.getStorageInfo());
}
if (other.hasKeys()) {
mergeKeys(other.getKeys());
}
if (other.hasSoftwareVersion()) {
bitField0_ |= 0x00000008;
softwareVersion_ = other.softwareVersion_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasDatanodeID()) {
return false;
}
if (!hasStorageInfo()) {
return false;
}
if (!hasKeys()) {
return false;
}
if (!hasSoftwareVersion()) {
return false;
}
if (!getDatanodeID().isInitialized()) {
return false;
}
if (!getStorageInfo().isInitialized()) {
return false;
}
if (!getKeys().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto datanodeID_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> datanodeIDBuilder_;
/**
* required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;
*
*
* Datanode information
*
*/
public boolean hasDatanodeID() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;
*
*
* Datanode information
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodeID() {
if (datanodeIDBuilder_ == null) {
return datanodeID_;
} else {
return datanodeIDBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;
*
*
* Datanode information
*
*/
public Builder setDatanodeID(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
if (datanodeIDBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
datanodeID_ = value;
onChanged();
} else {
datanodeIDBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;
*
*
* Datanode information
*
*/
public Builder setDatanodeID(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) {
if (datanodeIDBuilder_ == null) {
datanodeID_ = builderForValue.build();
onChanged();
} else {
datanodeIDBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;
*
*
* Datanode information
*
*/
public Builder mergeDatanodeID(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
if (datanodeIDBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
datanodeID_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()) {
datanodeID_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder(datanodeID_).mergeFrom(value).buildPartial();
} else {
datanodeID_ = value;
}
onChanged();
} else {
datanodeIDBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;
*
*
* Datanode information
*
*/
public Builder clearDatanodeID() {
if (datanodeIDBuilder_ == null) {
datanodeID_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance();
onChanged();
} else {
datanodeIDBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;
*
*
* Datanode information
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder getDatanodeIDBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getDatanodeIDFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;
*
*
* Datanode information
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeIDOrBuilder() {
if (datanodeIDBuilder_ != null) {
return datanodeIDBuilder_.getMessageOrBuilder();
} else {
return datanodeID_;
}
}
/**
* required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;
*
*
* Datanode information
*
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>
getDatanodeIDFieldBuilder() {
if (datanodeIDBuilder_ == null) {
datanodeIDBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>(
datanodeID_,
getParentForChildren(),
isClean());
datanodeID_ = null;
}
return datanodeIDBuilder_;
}
// required .hadoop.hdfs.StorageInfoProto storageInfo = 2;
private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder> storageInfoBuilder_;
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 2;
*
*
* Node information
*
*/
public boolean hasStorageInfo() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 2;
*
*
* Node information
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto getStorageInfo() {
if (storageInfoBuilder_ == null) {
return storageInfo_;
} else {
return storageInfoBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 2;
*
*
* Node information
*
*/
public Builder setStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto value) {
if (storageInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
storageInfo_ = value;
onChanged();
} else {
storageInfoBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 2;
*
*
* Node information
*
*/
public Builder setStorageInfo(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder builderForValue) {
if (storageInfoBuilder_ == null) {
storageInfo_ = builderForValue.build();
onChanged();
} else {
storageInfoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
return this;
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 2;
*
*
* Node information
*
*/
public Builder mergeStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto value) {
if (storageInfoBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002) &&
storageInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance()) {
storageInfo_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.newBuilder(storageInfo_).mergeFrom(value).buildPartial();
} else {
storageInfo_ = value;
}
onChanged();
} else {
storageInfoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 2;
*
*
* Node information
*
*/
public Builder clearStorageInfo() {
if (storageInfoBuilder_ == null) {
storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance();
onChanged();
} else {
storageInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 2;
*
*
* Node information
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder getStorageInfoBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getStorageInfoFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 2;
*
*
* Node information
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() {
if (storageInfoBuilder_ != null) {
return storageInfoBuilder_.getMessageOrBuilder();
} else {
return storageInfo_;
}
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 2;
*
*
* Node information
*
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder>
getStorageInfoFieldBuilder() {
if (storageInfoBuilder_ == null) {
storageInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder>(
storageInfo_,
getParentForChildren(),
isClean());
storageInfo_ = null;
}
return storageInfoBuilder_;
}
// required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;
private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder> keysBuilder_;
/**
* required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;
*
*
* Block keys
*
*/
public boolean hasKeys() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;
*
*
* Block keys
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto getKeys() {
if (keysBuilder_ == null) {
return keys_;
} else {
return keysBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;
*
*
* Block keys
*
*/
public Builder setKeys(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto value) {
if (keysBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
keys_ = value;
onChanged();
} else {
keysBuilder_.setMessage(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;
*
*
* Block keys
*
*/
public Builder setKeys(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.Builder builderForValue) {
if (keysBuilder_ == null) {
keys_ = builderForValue.build();
onChanged();
} else {
keysBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000004;
return this;
}
/**
* required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;
*
*
* Block keys
*
*/
public Builder mergeKeys(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto value) {
if (keysBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004) &&
keys_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.getDefaultInstance()) {
keys_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.newBuilder(keys_).mergeFrom(value).buildPartial();
} else {
keys_ = value;
}
onChanged();
} else {
keysBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;
*
*
* Block keys
*
*/
public Builder clearKeys() {
if (keysBuilder_ == null) {
keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.getDefaultInstance();
onChanged();
} else {
keysBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
/**
* required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;
*
*
* Block keys
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.Builder getKeysBuilder() {
bitField0_ |= 0x00000004;
onChanged();
return getKeysFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;
*
*
* Block keys
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder() {
if (keysBuilder_ != null) {
return keysBuilder_.getMessageOrBuilder();
} else {
return keys_;
}
}
/**
* required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;
*
*
* Block keys
*
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder>
getKeysFieldBuilder() {
if (keysBuilder_ == null) {
keysBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder>(
keys_,
getParentForChildren(),
isClean());
keys_ = null;
}
return keysBuilder_;
}
// required string softwareVersion = 4;
private java.lang.Object softwareVersion_ = "";
/**
* required string softwareVersion = 4;
*
*
* Software version of the DN, e.g. "2.0.0"
*
*/
public boolean hasSoftwareVersion() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required string softwareVersion = 4;
*
*
* Software version of the DN, e.g. "2.0.0"
*
*/
public java.lang.String getSoftwareVersion() {
java.lang.Object ref = softwareVersion_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
softwareVersion_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string softwareVersion = 4;
*
*
* Software version of the DN, e.g. "2.0.0"
*
*/
public com.google.protobuf.ByteString
getSoftwareVersionBytes() {
java.lang.Object ref = softwareVersion_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
softwareVersion_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string softwareVersion = 4;
*
*
* Software version of the DN, e.g. "2.0.0"
*
*/
public Builder setSoftwareVersion(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
softwareVersion_ = value;
onChanged();
return this;
}
/**
* required string softwareVersion = 4;
*
*
* Software version of the DN, e.g. "2.0.0"
*
*/
public Builder clearSoftwareVersion() {
bitField0_ = (bitField0_ & ~0x00000008);
softwareVersion_ = getDefaultInstance().getSoftwareVersion();
onChanged();
return this;
}
/**
* required string softwareVersion = 4;
*
*
* Software version of the DN, e.g. "2.0.0"
*
*/
public Builder setSoftwareVersionBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
softwareVersion_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.DatanodeRegistrationProto)
}
static {
defaultInstance = new DatanodeRegistrationProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.DatanodeRegistrationProto)
}
public interface DatanodeCommandProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.datanode.DatanodeCommandProto.Type cmdType = 1;
/**
* required .hadoop.hdfs.datanode.DatanodeCommandProto.Type cmdType = 1;
*
*
* Type of the command
*
*/
boolean hasCmdType();
/**
* required .hadoop.hdfs.datanode.DatanodeCommandProto.Type cmdType = 1;
*
*
* Type of the command
*
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type getCmdType();
// optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;
/**
* optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;
*
*
* One of the following command is available when the corresponding
* cmdType is set
*
*/
boolean hasBalancerCmd();
/**
* optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;
*
*
* One of the following command is available when the corresponding
* cmdType is set
*
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto getBalancerCmd();
/**
* optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;
*
*
* One of the following command is available when the corresponding
* cmdType is set
*
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder getBalancerCmdOrBuilder();
// optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;
/**
* optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;
*/
boolean hasBlkCmd();
/**
* optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto getBlkCmd();
/**
* optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder getBlkCmdOrBuilder();
// optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;
/**
* optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;
*/
boolean hasRecoveryCmd();
/**
* optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto getRecoveryCmd();
/**
* optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProtoOrBuilder getRecoveryCmdOrBuilder();
// optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;
/**
* optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;
*/
boolean hasFinalizeCmd();
/**
* optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto getFinalizeCmd();
/**
* optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder getFinalizeCmdOrBuilder();
// optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;
/**
* optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;
*/
boolean hasKeyUpdateCmd();
/**
* optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto getKeyUpdateCmd();
/**
* optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder getKeyUpdateCmdOrBuilder();
// optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;
/**
* optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;
*/
boolean hasRegisterCmd();
/**
* optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto getRegisterCmd();
/**
* optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder getRegisterCmdOrBuilder();
// optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;
/**
* optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;
*/
boolean hasBlkIdCmd();
/**
* optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto getBlkIdCmd();
/**
* optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProtoOrBuilder getBlkIdCmdOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.DatanodeCommandProto}
*
*
**
* Commands sent from namenode to the datanodes
*
*/
public static final class DatanodeCommandProto extends
com.google.protobuf.GeneratedMessage
implements DatanodeCommandProtoOrBuilder {
// Use DatanodeCommandProto.newBuilder() to construct.
private DatanodeCommandProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private DatanodeCommandProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final DatanodeCommandProto defaultInstance;
public static DatanodeCommandProto getDefaultInstance() {
return defaultInstance;
}
public DatanodeCommandProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DatanodeCommandProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type value = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
cmdType_ = value;
}
break;
}
case 18: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
subBuilder = balancerCmd_.toBuilder();
}
balancerCmd_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(balancerCmd_);
balancerCmd_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
case 26: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
subBuilder = blkCmd_.toBuilder();
}
blkCmd_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(blkCmd_);
blkCmd_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000004;
break;
}
case 34: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000008) == 0x00000008)) {
subBuilder = recoveryCmd_.toBuilder();
}
recoveryCmd_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(recoveryCmd_);
recoveryCmd_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000008;
break;
}
case 42: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000010) == 0x00000010)) {
subBuilder = finalizeCmd_.toBuilder();
}
finalizeCmd_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(finalizeCmd_);
finalizeCmd_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000010;
break;
}
case 50: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000020) == 0x00000020)) {
subBuilder = keyUpdateCmd_.toBuilder();
}
keyUpdateCmd_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(keyUpdateCmd_);
keyUpdateCmd_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000020;
break;
}
case 58: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000040) == 0x00000040)) {
subBuilder = registerCmd_.toBuilder();
}
registerCmd_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(registerCmd_);
registerCmd_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000040;
break;
}
case 66: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000080) == 0x00000080)) {
subBuilder = blkIdCmd_.toBuilder();
}
blkIdCmd_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(blkIdCmd_);
blkIdCmd_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000080;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_DatanodeCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_DatanodeCommandProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public DatanodeCommandProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new DatanodeCommandProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
/**
* Protobuf enum {@code hadoop.hdfs.datanode.DatanodeCommandProto.Type}
*/
public enum Type
implements com.google.protobuf.ProtocolMessageEnum {
/**
* BalancerBandwidthCommand = 0;
*/
BalancerBandwidthCommand(0, 0),
/**
* BlockCommand = 1;
*/
BlockCommand(1, 1),
/**
* BlockRecoveryCommand = 2;
*/
BlockRecoveryCommand(2, 2),
/**
* FinalizeCommand = 3;
*/
FinalizeCommand(3, 3),
/**
* KeyUpdateCommand = 4;
*/
KeyUpdateCommand(4, 4),
/**
* RegisterCommand = 5;
*/
RegisterCommand(5, 5),
/**
* UnusedUpgradeCommand = 6;
*/
UnusedUpgradeCommand(6, 6),
/**
* NullDatanodeCommand = 7;
*/
NullDatanodeCommand(7, 7),
/**
* BlockIdCommand = 8;
*/
BlockIdCommand(8, 8),
;
/**
* BalancerBandwidthCommand = 0;
*/
public static final int BalancerBandwidthCommand_VALUE = 0;
/**
* BlockCommand = 1;
*/
public static final int BlockCommand_VALUE = 1;
/**
* BlockRecoveryCommand = 2;
*/
public static final int BlockRecoveryCommand_VALUE = 2;
/**
* FinalizeCommand = 3;
*/
public static final int FinalizeCommand_VALUE = 3;
/**
* KeyUpdateCommand = 4;
*/
public static final int KeyUpdateCommand_VALUE = 4;
/**
* RegisterCommand = 5;
*/
public static final int RegisterCommand_VALUE = 5;
/**
* UnusedUpgradeCommand = 6;
*/
public static final int UnusedUpgradeCommand_VALUE = 6;
/**
* NullDatanodeCommand = 7;
*/
public static final int NullDatanodeCommand_VALUE = 7;
/**
* BlockIdCommand = 8;
*/
public static final int BlockIdCommand_VALUE = 8;
public final int getNumber() { return value; }
public static Type valueOf(int value) {
switch (value) {
case 0: return BalancerBandwidthCommand;
case 1: return BlockCommand;
case 2: return BlockRecoveryCommand;
case 3: return FinalizeCommand;
case 4: return KeyUpdateCommand;
case 5: return RegisterCommand;
case 6: return UnusedUpgradeCommand;
case 7: return NullDatanodeCommand;
case 8: return BlockIdCommand;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public Type findValueByNumber(int number) {
return Type.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDescriptor().getEnumTypes().get(0);
}
private static final Type[] VALUES = values();
public static Type valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private Type(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.datanode.DatanodeCommandProto.Type)
}
private int bitField0_;
// required .hadoop.hdfs.datanode.DatanodeCommandProto.Type cmdType = 1;
public static final int CMDTYPE_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type cmdType_;
/**
* required .hadoop.hdfs.datanode.DatanodeCommandProto.Type cmdType = 1;
*
*
* Type of the command
*
*/
public boolean hasCmdType() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.datanode.DatanodeCommandProto.Type cmdType = 1;
*
*
* Type of the command
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type getCmdType() {
return cmdType_;
}
// optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;
public static final int BALANCERCMD_FIELD_NUMBER = 2;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto balancerCmd_;
/**
* optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;
*
*
* One of the following command is available when the corresponding
* cmdType is set
*
*/
public boolean hasBalancerCmd() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;
*
*
* One of the following command is available when the corresponding
* cmdType is set
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto getBalancerCmd() {
return balancerCmd_;
}
/**
* optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;
*
*
* One of the following command is available when the corresponding
* cmdType is set
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder getBalancerCmdOrBuilder() {
return balancerCmd_;
}
// optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;
public static final int BLKCMD_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto blkCmd_;
/**
* optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;
*/
public boolean hasBlkCmd() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto getBlkCmd() {
return blkCmd_;
}
/**
* optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder getBlkCmdOrBuilder() {
return blkCmd_;
}
// optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;
public static final int RECOVERYCMD_FIELD_NUMBER = 4;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto recoveryCmd_;
/**
* optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;
*/
public boolean hasRecoveryCmd() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto getRecoveryCmd() {
return recoveryCmd_;
}
/**
* optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProtoOrBuilder getRecoveryCmdOrBuilder() {
return recoveryCmd_;
}
// optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;
public static final int FINALIZECMD_FIELD_NUMBER = 5;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto finalizeCmd_;
/**
* optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;
*/
public boolean hasFinalizeCmd() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto getFinalizeCmd() {
return finalizeCmd_;
}
/**
* optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder getFinalizeCmdOrBuilder() {
return finalizeCmd_;
}
// optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;
public static final int KEYUPDATECMD_FIELD_NUMBER = 6;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto keyUpdateCmd_;
/**
* optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;
*/
public boolean hasKeyUpdateCmd() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto getKeyUpdateCmd() {
return keyUpdateCmd_;
}
/**
* optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder getKeyUpdateCmdOrBuilder() {
return keyUpdateCmd_;
}
// optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;
public static final int REGISTERCMD_FIELD_NUMBER = 7;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto registerCmd_;
/**
* optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;
*/
public boolean hasRegisterCmd() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto getRegisterCmd() {
return registerCmd_;
}
/**
* optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder getRegisterCmdOrBuilder() {
return registerCmd_;
}
// optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;
public static final int BLKIDCMD_FIELD_NUMBER = 8;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto blkIdCmd_;
/**
* optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;
*/
public boolean hasBlkIdCmd() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto getBlkIdCmd() {
return blkIdCmd_;
}
/**
* optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProtoOrBuilder getBlkIdCmdOrBuilder() {
return blkIdCmd_;
}
private void initFields() {
cmdType_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.BalancerBandwidthCommand;
balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance();
blkCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance();
recoveryCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance();
finalizeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance();
keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance();
registerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance();
blkIdCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasCmdType()) {
memoizedIsInitialized = 0;
return false;
}
if (hasBalancerCmd()) {
if (!getBalancerCmd().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasBlkCmd()) {
if (!getBlkCmd().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasRecoveryCmd()) {
if (!getRecoveryCmd().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasFinalizeCmd()) {
if (!getFinalizeCmd().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasKeyUpdateCmd()) {
if (!getKeyUpdateCmd().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasBlkIdCmd()) {
if (!getBlkIdCmd().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, cmdType_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeMessage(2, balancerCmd_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeMessage(3, blkCmd_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeMessage(4, recoveryCmd_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeMessage(5, finalizeCmd_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeMessage(6, keyUpdateCmd_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeMessage(7, registerCmd_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
output.writeMessage(8, blkIdCmd_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, cmdType_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, balancerCmd_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, blkCmd_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(4, recoveryCmd_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(5, finalizeCmd_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(6, keyUpdateCmd_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(7, registerCmd_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(8, blkIdCmd_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto) obj;
boolean result = true;
result = result && (hasCmdType() == other.hasCmdType());
if (hasCmdType()) {
result = result &&
(getCmdType() == other.getCmdType());
}
result = result && (hasBalancerCmd() == other.hasBalancerCmd());
if (hasBalancerCmd()) {
result = result && getBalancerCmd()
.equals(other.getBalancerCmd());
}
result = result && (hasBlkCmd() == other.hasBlkCmd());
if (hasBlkCmd()) {
result = result && getBlkCmd()
.equals(other.getBlkCmd());
}
result = result && (hasRecoveryCmd() == other.hasRecoveryCmd());
if (hasRecoveryCmd()) {
result = result && getRecoveryCmd()
.equals(other.getRecoveryCmd());
}
result = result && (hasFinalizeCmd() == other.hasFinalizeCmd());
if (hasFinalizeCmd()) {
result = result && getFinalizeCmd()
.equals(other.getFinalizeCmd());
}
result = result && (hasKeyUpdateCmd() == other.hasKeyUpdateCmd());
if (hasKeyUpdateCmd()) {
result = result && getKeyUpdateCmd()
.equals(other.getKeyUpdateCmd());
}
result = result && (hasRegisterCmd() == other.hasRegisterCmd());
if (hasRegisterCmd()) {
result = result && getRegisterCmd()
.equals(other.getRegisterCmd());
}
result = result && (hasBlkIdCmd() == other.hasBlkIdCmd());
if (hasBlkIdCmd()) {
result = result && getBlkIdCmd()
.equals(other.getBlkIdCmd());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasCmdType()) {
hash = (37 * hash) + CMDTYPE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getCmdType());
}
if (hasBalancerCmd()) {
hash = (37 * hash) + BALANCERCMD_FIELD_NUMBER;
hash = (53 * hash) + getBalancerCmd().hashCode();
}
if (hasBlkCmd()) {
hash = (37 * hash) + BLKCMD_FIELD_NUMBER;
hash = (53 * hash) + getBlkCmd().hashCode();
}
if (hasRecoveryCmd()) {
hash = (37 * hash) + RECOVERYCMD_FIELD_NUMBER;
hash = (53 * hash) + getRecoveryCmd().hashCode();
}
if (hasFinalizeCmd()) {
hash = (37 * hash) + FINALIZECMD_FIELD_NUMBER;
hash = (53 * hash) + getFinalizeCmd().hashCode();
}
if (hasKeyUpdateCmd()) {
hash = (37 * hash) + KEYUPDATECMD_FIELD_NUMBER;
hash = (53 * hash) + getKeyUpdateCmd().hashCode();
}
if (hasRegisterCmd()) {
hash = (37 * hash) + REGISTERCMD_FIELD_NUMBER;
hash = (53 * hash) + getRegisterCmd().hashCode();
}
if (hasBlkIdCmd()) {
hash = (37 * hash) + BLKIDCMD_FIELD_NUMBER;
hash = (53 * hash) + getBlkIdCmd().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.DatanodeCommandProto}
*
*
**
* Commands sent from namenode to the datanodes
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_DatanodeCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_DatanodeCommandProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getBalancerCmdFieldBuilder();
getBlkCmdFieldBuilder();
getRecoveryCmdFieldBuilder();
getFinalizeCmdFieldBuilder();
getKeyUpdateCmdFieldBuilder();
getRegisterCmdFieldBuilder();
getBlkIdCmdFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
cmdType_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.BalancerBandwidthCommand;
bitField0_ = (bitField0_ & ~0x00000001);
if (balancerCmdBuilder_ == null) {
balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance();
} else {
balancerCmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
if (blkCmdBuilder_ == null) {
blkCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance();
} else {
blkCmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
if (recoveryCmdBuilder_ == null) {
recoveryCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance();
} else {
recoveryCmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
if (finalizeCmdBuilder_ == null) {
finalizeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance();
} else {
finalizeCmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000010);
if (keyUpdateCmdBuilder_ == null) {
keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance();
} else {
keyUpdateCmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000020);
if (registerCmdBuilder_ == null) {
registerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance();
} else {
registerCmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000040);
if (blkIdCmdBuilder_ == null) {
blkIdCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance();
} else {
blkIdCmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000080);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_DatanodeCommandProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.cmdType_ = cmdType_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
if (balancerCmdBuilder_ == null) {
result.balancerCmd_ = balancerCmd_;
} else {
result.balancerCmd_ = balancerCmdBuilder_.build();
}
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
if (blkCmdBuilder_ == null) {
result.blkCmd_ = blkCmd_;
} else {
result.blkCmd_ = blkCmdBuilder_.build();
}
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
if (recoveryCmdBuilder_ == null) {
result.recoveryCmd_ = recoveryCmd_;
} else {
result.recoveryCmd_ = recoveryCmdBuilder_.build();
}
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
if (finalizeCmdBuilder_ == null) {
result.finalizeCmd_ = finalizeCmd_;
} else {
result.finalizeCmd_ = finalizeCmdBuilder_.build();
}
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000020;
}
if (keyUpdateCmdBuilder_ == null) {
result.keyUpdateCmd_ = keyUpdateCmd_;
} else {
result.keyUpdateCmd_ = keyUpdateCmdBuilder_.build();
}
if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
to_bitField0_ |= 0x00000040;
}
if (registerCmdBuilder_ == null) {
result.registerCmd_ = registerCmd_;
} else {
result.registerCmd_ = registerCmdBuilder_.build();
}
if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
to_bitField0_ |= 0x00000080;
}
if (blkIdCmdBuilder_ == null) {
result.blkIdCmd_ = blkIdCmd_;
} else {
result.blkIdCmd_ = blkIdCmdBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance()) return this;
if (other.hasCmdType()) {
setCmdType(other.getCmdType());
}
if (other.hasBalancerCmd()) {
mergeBalancerCmd(other.getBalancerCmd());
}
if (other.hasBlkCmd()) {
mergeBlkCmd(other.getBlkCmd());
}
if (other.hasRecoveryCmd()) {
mergeRecoveryCmd(other.getRecoveryCmd());
}
if (other.hasFinalizeCmd()) {
mergeFinalizeCmd(other.getFinalizeCmd());
}
if (other.hasKeyUpdateCmd()) {
mergeKeyUpdateCmd(other.getKeyUpdateCmd());
}
if (other.hasRegisterCmd()) {
mergeRegisterCmd(other.getRegisterCmd());
}
if (other.hasBlkIdCmd()) {
mergeBlkIdCmd(other.getBlkIdCmd());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasCmdType()) {
return false;
}
if (hasBalancerCmd()) {
if (!getBalancerCmd().isInitialized()) {
return false;
}
}
if (hasBlkCmd()) {
if (!getBlkCmd().isInitialized()) {
return false;
}
}
if (hasRecoveryCmd()) {
if (!getRecoveryCmd().isInitialized()) {
return false;
}
}
if (hasFinalizeCmd()) {
if (!getFinalizeCmd().isInitialized()) {
return false;
}
}
if (hasKeyUpdateCmd()) {
if (!getKeyUpdateCmd().isInitialized()) {
return false;
}
}
if (hasBlkIdCmd()) {
if (!getBlkIdCmd().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.datanode.DatanodeCommandProto.Type cmdType = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type cmdType_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.BalancerBandwidthCommand;
/**
* required .hadoop.hdfs.datanode.DatanodeCommandProto.Type cmdType = 1;
*
*
* Type of the command
*
*/
public boolean hasCmdType() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.datanode.DatanodeCommandProto.Type cmdType = 1;
*
*
* Type of the command
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type getCmdType() {
return cmdType_;
}
/**
* required .hadoop.hdfs.datanode.DatanodeCommandProto.Type cmdType = 1;
*
*
* Type of the command
*
*/
public Builder setCmdType(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
cmdType_ = value;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.datanode.DatanodeCommandProto.Type cmdType = 1;
*
*
* Type of the command
*
*/
public Builder clearCmdType() {
bitField0_ = (bitField0_ & ~0x00000001);
cmdType_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.BalancerBandwidthCommand;
onChanged();
return this;
}
// optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder> balancerCmdBuilder_;
/**
* optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;
*
*
* One of the following command is available when the corresponding
* cmdType is set
*
*/
public boolean hasBalancerCmd() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;
*
*
* One of the following command is available when the corresponding
* cmdType is set
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto getBalancerCmd() {
if (balancerCmdBuilder_ == null) {
return balancerCmd_;
} else {
return balancerCmdBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;
*
*
* One of the following command is available when the corresponding
* cmdType is set
*
*/
public Builder setBalancerCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto value) {
if (balancerCmdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
balancerCmd_ = value;
onChanged();
} else {
balancerCmdBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;
*
*
* One of the following command is available when the corresponding
* cmdType is set
*
*/
public Builder setBalancerCmd(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder builderForValue) {
if (balancerCmdBuilder_ == null) {
balancerCmd_ = builderForValue.build();
onChanged();
} else {
balancerCmdBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
return this;
}
/**
* optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;
*
*
* One of the following command is available when the corresponding
* cmdType is set
*
*/
public Builder mergeBalancerCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto value) {
if (balancerCmdBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002) &&
balancerCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance()) {
balancerCmd_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.newBuilder(balancerCmd_).mergeFrom(value).buildPartial();
} else {
balancerCmd_ = value;
}
onChanged();
} else {
balancerCmdBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;
*
*
* One of the following command is available when the corresponding
* cmdType is set
*
*/
public Builder clearBalancerCmd() {
if (balancerCmdBuilder_ == null) {
balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance();
onChanged();
} else {
balancerCmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
/**
* optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;
*
*
* One of the following command is available when the corresponding
* cmdType is set
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder getBalancerCmdBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getBalancerCmdFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;
*
*
* One of the following command is available when the corresponding
* cmdType is set
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder getBalancerCmdOrBuilder() {
if (balancerCmdBuilder_ != null) {
return balancerCmdBuilder_.getMessageOrBuilder();
} else {
return balancerCmd_;
}
}
/**
* optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;
*
*
* One of the following command is available when the corresponding
* cmdType is set
*
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder>
getBalancerCmdFieldBuilder() {
if (balancerCmdBuilder_ == null) {
balancerCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder>(
balancerCmd_,
getParentForChildren(),
isClean());
balancerCmd_ = null;
}
return balancerCmdBuilder_;
}
// optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto blkCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder> blkCmdBuilder_;
/**
* optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;
*/
public boolean hasBlkCmd() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto getBlkCmd() {
if (blkCmdBuilder_ == null) {
return blkCmd_;
} else {
return blkCmdBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;
*/
public Builder setBlkCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto value) {
if (blkCmdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
blkCmd_ = value;
onChanged();
} else {
blkCmdBuilder_.setMessage(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;
*/
public Builder setBlkCmd(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder builderForValue) {
if (blkCmdBuilder_ == null) {
blkCmd_ = builderForValue.build();
onChanged();
} else {
blkCmdBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;
*/
public Builder mergeBlkCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto value) {
if (blkCmdBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004) &&
blkCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance()) {
blkCmd_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.newBuilder(blkCmd_).mergeFrom(value).buildPartial();
} else {
blkCmd_ = value;
}
onChanged();
} else {
blkCmdBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;
*/
public Builder clearBlkCmd() {
if (blkCmdBuilder_ == null) {
blkCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance();
onChanged();
} else {
blkCmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
/**
* optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder getBlkCmdBuilder() {
bitField0_ |= 0x00000004;
onChanged();
return getBlkCmdFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder getBlkCmdOrBuilder() {
if (blkCmdBuilder_ != null) {
return blkCmdBuilder_.getMessageOrBuilder();
} else {
return blkCmd_;
}
}
/**
* optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder>
getBlkCmdFieldBuilder() {
if (blkCmdBuilder_ == null) {
blkCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder>(
blkCmd_,
getParentForChildren(),
isClean());
blkCmd_ = null;
}
return blkCmdBuilder_;
}
// optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto recoveryCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProtoOrBuilder> recoveryCmdBuilder_;
/**
* optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;
*/
public boolean hasRecoveryCmd() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto getRecoveryCmd() {
if (recoveryCmdBuilder_ == null) {
return recoveryCmd_;
} else {
return recoveryCmdBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;
*/
public Builder setRecoveryCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto value) {
if (recoveryCmdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
recoveryCmd_ = value;
onChanged();
} else {
recoveryCmdBuilder_.setMessage(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;
*/
public Builder setRecoveryCmd(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.Builder builderForValue) {
if (recoveryCmdBuilder_ == null) {
recoveryCmd_ = builderForValue.build();
onChanged();
} else {
recoveryCmdBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;
*/
public Builder mergeRecoveryCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto value) {
if (recoveryCmdBuilder_ == null) {
if (((bitField0_ & 0x00000008) == 0x00000008) &&
recoveryCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance()) {
recoveryCmd_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.newBuilder(recoveryCmd_).mergeFrom(value).buildPartial();
} else {
recoveryCmd_ = value;
}
onChanged();
} else {
recoveryCmdBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;
*/
public Builder clearRecoveryCmd() {
if (recoveryCmdBuilder_ == null) {
recoveryCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance();
onChanged();
} else {
recoveryCmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
/**
* optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.Builder getRecoveryCmdBuilder() {
bitField0_ |= 0x00000008;
onChanged();
return getRecoveryCmdFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProtoOrBuilder getRecoveryCmdOrBuilder() {
if (recoveryCmdBuilder_ != null) {
return recoveryCmdBuilder_.getMessageOrBuilder();
} else {
return recoveryCmd_;
}
}
/**
* optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProtoOrBuilder>
getRecoveryCmdFieldBuilder() {
if (recoveryCmdBuilder_ == null) {
recoveryCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProtoOrBuilder>(
recoveryCmd_,
getParentForChildren(),
isClean());
recoveryCmd_ = null;
}
return recoveryCmdBuilder_;
}
// optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto finalizeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder> finalizeCmdBuilder_;
/**
* optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;
*/
public boolean hasFinalizeCmd() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto getFinalizeCmd() {
if (finalizeCmdBuilder_ == null) {
return finalizeCmd_;
} else {
return finalizeCmdBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;
*/
public Builder setFinalizeCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto value) {
if (finalizeCmdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
finalizeCmd_ = value;
onChanged();
} else {
finalizeCmdBuilder_.setMessage(value);
}
bitField0_ |= 0x00000010;
return this;
}
/**
* optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;
*/
public Builder setFinalizeCmd(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder builderForValue) {
if (finalizeCmdBuilder_ == null) {
finalizeCmd_ = builderForValue.build();
onChanged();
} else {
finalizeCmdBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000010;
return this;
}
/**
* optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;
*/
public Builder mergeFinalizeCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto value) {
if (finalizeCmdBuilder_ == null) {
if (((bitField0_ & 0x00000010) == 0x00000010) &&
finalizeCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance()) {
finalizeCmd_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.newBuilder(finalizeCmd_).mergeFrom(value).buildPartial();
} else {
finalizeCmd_ = value;
}
onChanged();
} else {
finalizeCmdBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000010;
return this;
}
/**
* optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;
*/
public Builder clearFinalizeCmd() {
if (finalizeCmdBuilder_ == null) {
finalizeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance();
onChanged();
} else {
finalizeCmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
/**
* optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder getFinalizeCmdBuilder() {
bitField0_ |= 0x00000010;
onChanged();
return getFinalizeCmdFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder getFinalizeCmdOrBuilder() {
if (finalizeCmdBuilder_ != null) {
return finalizeCmdBuilder_.getMessageOrBuilder();
} else {
return finalizeCmd_;
}
}
/**
* optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder>
getFinalizeCmdFieldBuilder() {
if (finalizeCmdBuilder_ == null) {
finalizeCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder>(
finalizeCmd_,
getParentForChildren(),
isClean());
finalizeCmd_ = null;
}
return finalizeCmdBuilder_;
}
// optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder> keyUpdateCmdBuilder_;
/**
* optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;
*/
public boolean hasKeyUpdateCmd() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto getKeyUpdateCmd() {
if (keyUpdateCmdBuilder_ == null) {
return keyUpdateCmd_;
} else {
return keyUpdateCmdBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;
*/
public Builder setKeyUpdateCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto value) {
if (keyUpdateCmdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
keyUpdateCmd_ = value;
onChanged();
} else {
keyUpdateCmdBuilder_.setMessage(value);
}
bitField0_ |= 0x00000020;
return this;
}
/**
* optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;
*/
public Builder setKeyUpdateCmd(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder builderForValue) {
if (keyUpdateCmdBuilder_ == null) {
keyUpdateCmd_ = builderForValue.build();
onChanged();
} else {
keyUpdateCmdBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000020;
return this;
}
/**
* optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;
*/
public Builder mergeKeyUpdateCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto value) {
if (keyUpdateCmdBuilder_ == null) {
if (((bitField0_ & 0x00000020) == 0x00000020) &&
keyUpdateCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance()) {
keyUpdateCmd_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.newBuilder(keyUpdateCmd_).mergeFrom(value).buildPartial();
} else {
keyUpdateCmd_ = value;
}
onChanged();
} else {
keyUpdateCmdBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000020;
return this;
}
/**
* optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;
*/
public Builder clearKeyUpdateCmd() {
if (keyUpdateCmdBuilder_ == null) {
keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance();
onChanged();
} else {
keyUpdateCmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000020);
return this;
}
/**
* optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder getKeyUpdateCmdBuilder() {
bitField0_ |= 0x00000020;
onChanged();
return getKeyUpdateCmdFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder getKeyUpdateCmdOrBuilder() {
if (keyUpdateCmdBuilder_ != null) {
return keyUpdateCmdBuilder_.getMessageOrBuilder();
} else {
return keyUpdateCmd_;
}
}
/**
* optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder>
getKeyUpdateCmdFieldBuilder() {
if (keyUpdateCmdBuilder_ == null) {
keyUpdateCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder>(
keyUpdateCmd_,
getParentForChildren(),
isClean());
keyUpdateCmd_ = null;
}
return keyUpdateCmdBuilder_;
}
// optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto registerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder> registerCmdBuilder_;
/**
* optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;
*/
public boolean hasRegisterCmd() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto getRegisterCmd() {
if (registerCmdBuilder_ == null) {
return registerCmd_;
} else {
return registerCmdBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;
*/
public Builder setRegisterCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto value) {
if (registerCmdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
registerCmd_ = value;
onChanged();
} else {
registerCmdBuilder_.setMessage(value);
}
bitField0_ |= 0x00000040;
return this;
}
/**
* optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;
*/
public Builder setRegisterCmd(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder builderForValue) {
if (registerCmdBuilder_ == null) {
registerCmd_ = builderForValue.build();
onChanged();
} else {
registerCmdBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000040;
return this;
}
/**
* optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;
*/
public Builder mergeRegisterCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto value) {
if (registerCmdBuilder_ == null) {
if (((bitField0_ & 0x00000040) == 0x00000040) &&
registerCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance()) {
registerCmd_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.newBuilder(registerCmd_).mergeFrom(value).buildPartial();
} else {
registerCmd_ = value;
}
onChanged();
} else {
registerCmdBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000040;
return this;
}
/**
* optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;
*/
public Builder clearRegisterCmd() {
if (registerCmdBuilder_ == null) {
registerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance();
onChanged();
} else {
registerCmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000040);
return this;
}
/**
* optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder getRegisterCmdBuilder() {
bitField0_ |= 0x00000040;
onChanged();
return getRegisterCmdFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder getRegisterCmdOrBuilder() {
if (registerCmdBuilder_ != null) {
return registerCmdBuilder_.getMessageOrBuilder();
} else {
return registerCmd_;
}
}
/**
* optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder>
getRegisterCmdFieldBuilder() {
if (registerCmdBuilder_ == null) {
registerCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder>(
registerCmd_,
getParentForChildren(),
isClean());
registerCmd_ = null;
}
return registerCmdBuilder_;
}
// optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto blkIdCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProtoOrBuilder> blkIdCmdBuilder_;
/**
* optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;
*/
public boolean hasBlkIdCmd() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto getBlkIdCmd() {
if (blkIdCmdBuilder_ == null) {
return blkIdCmd_;
} else {
return blkIdCmdBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;
*/
public Builder setBlkIdCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto value) {
if (blkIdCmdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
blkIdCmd_ = value;
onChanged();
} else {
blkIdCmdBuilder_.setMessage(value);
}
bitField0_ |= 0x00000080;
return this;
}
/**
* optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;
*/
public Builder setBlkIdCmd(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Builder builderForValue) {
if (blkIdCmdBuilder_ == null) {
blkIdCmd_ = builderForValue.build();
onChanged();
} else {
blkIdCmdBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000080;
return this;
}
/**
* optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;
*/
public Builder mergeBlkIdCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto value) {
if (blkIdCmdBuilder_ == null) {
if (((bitField0_ & 0x00000080) == 0x00000080) &&
blkIdCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance()) {
blkIdCmd_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.newBuilder(blkIdCmd_).mergeFrom(value).buildPartial();
} else {
blkIdCmd_ = value;
}
onChanged();
} else {
blkIdCmdBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000080;
return this;
}
/**
* optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;
*/
public Builder clearBlkIdCmd() {
if (blkIdCmdBuilder_ == null) {
blkIdCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance();
onChanged();
} else {
blkIdCmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000080);
return this;
}
/**
* optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Builder getBlkIdCmdBuilder() {
bitField0_ |= 0x00000080;
onChanged();
return getBlkIdCmdFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProtoOrBuilder getBlkIdCmdOrBuilder() {
if (blkIdCmdBuilder_ != null) {
return blkIdCmdBuilder_.getMessageOrBuilder();
} else {
return blkIdCmd_;
}
}
/**
* optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProtoOrBuilder>
getBlkIdCmdFieldBuilder() {
if (blkIdCmdBuilder_ == null) {
blkIdCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProtoOrBuilder>(
blkIdCmd_,
getParentForChildren(),
isClean());
blkIdCmd_ = null;
}
return blkIdCmdBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.DatanodeCommandProto)
}
static {
defaultInstance = new DatanodeCommandProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.DatanodeCommandProto)
}
public interface BalancerBandwidthCommandProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required uint64 bandwidth = 1;
/**
* required uint64 bandwidth = 1;
*
*
* Maximum bandwidth to be used by datanode for balancing
*
*/
boolean hasBandwidth();
/**
* required uint64 bandwidth = 1;
*
*
* Maximum bandwidth to be used by datanode for balancing
*
*/
long getBandwidth();
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.BalancerBandwidthCommandProto}
*
*
**
* Command sent from namenode to datanode to set the
* maximum bandwidth to be used for balancing.
*
*/
public static final class BalancerBandwidthCommandProto extends
com.google.protobuf.GeneratedMessage
implements BalancerBandwidthCommandProtoOrBuilder {
// Use BalancerBandwidthCommandProto.newBuilder() to construct.
private BalancerBandwidthCommandProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private BalancerBandwidthCommandProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final BalancerBandwidthCommandProto defaultInstance;
public static BalancerBandwidthCommandProto getDefaultInstance() {
return defaultInstance;
}
public BalancerBandwidthCommandProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private BalancerBandwidthCommandProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
bandwidth_ = input.readUInt64();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BalancerBandwidthCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BalancerBandwidthCommandProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public BalancerBandwidthCommandProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new BalancerBandwidthCommandProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required uint64 bandwidth = 1;
public static final int BANDWIDTH_FIELD_NUMBER = 1;
private long bandwidth_;
/**
* required uint64 bandwidth = 1;
*
*
* Maximum bandwidth to be used by datanode for balancing
*
*/
public boolean hasBandwidth() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint64 bandwidth = 1;
*
*
* Maximum bandwidth to be used by datanode for balancing
*
*/
public long getBandwidth() {
return bandwidth_;
}
private void initFields() {
bandwidth_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasBandwidth()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt64(1, bandwidth_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(1, bandwidth_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto) obj;
boolean result = true;
result = result && (hasBandwidth() == other.hasBandwidth());
if (hasBandwidth()) {
result = result && (getBandwidth()
== other.getBandwidth());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasBandwidth()) {
hash = (37 * hash) + BANDWIDTH_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getBandwidth());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.BalancerBandwidthCommandProto}
*
*
**
* Command sent from namenode to datanode to set the
* maximum bandwidth to be used for balancing.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BalancerBandwidthCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BalancerBandwidthCommandProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
bandwidth_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BalancerBandwidthCommandProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.bandwidth_ = bandwidth_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance()) return this;
if (other.hasBandwidth()) {
setBandwidth(other.getBandwidth());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasBandwidth()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required uint64 bandwidth = 1;
private long bandwidth_ ;
/**
* required uint64 bandwidth = 1;
*
*
* Maximum bandwidth to be used by datanode for balancing
*
*/
public boolean hasBandwidth() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint64 bandwidth = 1;
*
*
* Maximum bandwidth to be used by datanode for balancing
*
*/
public long getBandwidth() {
return bandwidth_;
}
/**
* required uint64 bandwidth = 1;
*
*
* Maximum bandwidth to be used by datanode for balancing
*
*/
public Builder setBandwidth(long value) {
bitField0_ |= 0x00000001;
bandwidth_ = value;
onChanged();
return this;
}
/**
* required uint64 bandwidth = 1;
*
*
* Maximum bandwidth to be used by datanode for balancing
*
*/
public Builder clearBandwidth() {
bitField0_ = (bitField0_ & ~0x00000001);
bandwidth_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.BalancerBandwidthCommandProto)
}
static {
defaultInstance = new BalancerBandwidthCommandProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.BalancerBandwidthCommandProto)
}
public interface BlockCommandProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.datanode.BlockCommandProto.Action action = 1;
/**
* required .hadoop.hdfs.datanode.BlockCommandProto.Action action = 1;
*/
boolean hasAction();
/**
* required .hadoop.hdfs.datanode.BlockCommandProto.Action action = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action getAction();
// required string blockPoolId = 2;
/**
* required string blockPoolId = 2;
*/
boolean hasBlockPoolId();
/**
* required string blockPoolId = 2;
*/
java.lang.String getBlockPoolId();
/**
* required string blockPoolId = 2;
*/
com.google.protobuf.ByteString
getBlockPoolIdBytes();
// repeated .hadoop.hdfs.BlockProto blocks = 3;
/**
* repeated .hadoop.hdfs.BlockProto blocks = 3;
*/
java.util.List
getBlocksList();
/**
* repeated .hadoop.hdfs.BlockProto blocks = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index);
/**
* repeated .hadoop.hdfs.BlockProto blocks = 3;
*/
int getBlocksCount();
/**
* repeated .hadoop.hdfs.BlockProto blocks = 3;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>
getBlocksOrBuilderList();
/**
* repeated .hadoop.hdfs.BlockProto blocks = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder(
int index);
// repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
/**
* repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
*/
java.util.List
getTargetsList();
/**
* repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getTargets(int index);
/**
* repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
*/
int getTargetsCount();
/**
* repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder>
getTargetsOrBuilderList();
/**
* repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder getTargetsOrBuilder(
int index);
// repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
/**
* repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
*/
java.util.List
getTargetStorageUuidsList();
/**
* repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProto getTargetStorageUuids(int index);
/**
* repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
*/
int getTargetStorageUuidsCount();
/**
* repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProtoOrBuilder>
getTargetStorageUuidsOrBuilderList();
/**
* repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProtoOrBuilder getTargetStorageUuidsOrBuilder(
int index);
// repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
/**
* repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
*/
java.util.List
getTargetStorageTypesList();
/**
* repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getTargetStorageTypes(int index);
/**
* repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
*/
int getTargetStorageTypesCount();
/**
* repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder>
getTargetStorageTypesOrBuilderList();
/**
* repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getTargetStorageTypesOrBuilder(
int index);
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.BlockCommandProto}
*
*
**
* Command to instruct datanodes to perform certain action
* on the given set of blocks.
*
*/
public static final class BlockCommandProto extends
com.google.protobuf.GeneratedMessage
implements BlockCommandProtoOrBuilder {
// Use BlockCommandProto.newBuilder() to construct.
private BlockCommandProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private BlockCommandProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final BlockCommandProto defaultInstance;
public static BlockCommandProto getDefaultInstance() {
return defaultInstance;
}
public BlockCommandProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private BlockCommandProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action value = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
action_ = value;
}
break;
}
case 18: {
bitField0_ |= 0x00000002;
blockPoolId_ = input.readBytes();
break;
}
case 26: {
if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
blocks_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000004;
}
blocks_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.PARSER, extensionRegistry));
break;
}
case 34: {
if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
targets_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000008;
}
targets_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.PARSER, extensionRegistry));
break;
}
case 42: {
if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
targetStorageUuids_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000010;
}
targetStorageUuids_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProto.PARSER, extensionRegistry));
break;
}
case 50: {
if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
targetStorageTypes_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000020;
}
targetStorageTypes_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.PARSER, extensionRegistry));
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
blocks_ = java.util.Collections.unmodifiableList(blocks_);
}
if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
targets_ = java.util.Collections.unmodifiableList(targets_);
}
if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
targetStorageUuids_ = java.util.Collections.unmodifiableList(targetStorageUuids_);
}
if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
targetStorageTypes_ = java.util.Collections.unmodifiableList(targetStorageTypes_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockCommandProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public BlockCommandProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new BlockCommandProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
/**
* Protobuf enum {@code hadoop.hdfs.datanode.BlockCommandProto.Action}
*/
public enum Action
implements com.google.protobuf.ProtocolMessageEnum {
/**
* TRANSFER = 1;
*
*
* Transfer blocks to another datanode
*
*/
TRANSFER(0, 1),
/**
* INVALIDATE = 2;
*
*
* Invalidate blocks
*
*/
INVALIDATE(1, 2),
/**
* SHUTDOWN = 3;
*
*
* Shutdown the datanode
*
*/
SHUTDOWN(2, 3),
;
/**
* TRANSFER = 1;
*
*
* Transfer blocks to another datanode
*
*/
public static final int TRANSFER_VALUE = 1;
/**
* INVALIDATE = 2;
*
*
* Invalidate blocks
*
*/
public static final int INVALIDATE_VALUE = 2;
/**
* SHUTDOWN = 3;
*
*
* Shutdown the datanode
*
*/
public static final int SHUTDOWN_VALUE = 3;
public final int getNumber() { return value; }
public static Action valueOf(int value) {
switch (value) {
case 1: return TRANSFER;
case 2: return INVALIDATE;
case 3: return SHUTDOWN;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public Action findValueByNumber(int number) {
return Action.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDescriptor().getEnumTypes().get(0);
}
private static final Action[] VALUES = values();
public static Action valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private Action(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.datanode.BlockCommandProto.Action)
}
private int bitField0_;
// required .hadoop.hdfs.datanode.BlockCommandProto.Action action = 1;
public static final int ACTION_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action action_;
/**
* required .hadoop.hdfs.datanode.BlockCommandProto.Action action = 1;
*/
public boolean hasAction() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.datanode.BlockCommandProto.Action action = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action getAction() {
return action_;
}
// required string blockPoolId = 2;
public static final int BLOCKPOOLID_FIELD_NUMBER = 2;
private java.lang.Object blockPoolId_;
/**
* required string blockPoolId = 2;
*/
public boolean hasBlockPoolId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string blockPoolId = 2;
*/
public java.lang.String getBlockPoolId() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
blockPoolId_ = s;
}
return s;
}
}
/**
* required string blockPoolId = 2;
*/
public com.google.protobuf.ByteString
getBlockPoolIdBytes() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
blockPoolId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// repeated .hadoop.hdfs.BlockProto blocks = 3;
public static final int BLOCKS_FIELD_NUMBER = 3;
private java.util.List blocks_;
/**
* repeated .hadoop.hdfs.BlockProto blocks = 3;
*/
public java.util.List getBlocksList() {
return blocks_;
}
/**
* repeated .hadoop.hdfs.BlockProto blocks = 3;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>
getBlocksOrBuilderList() {
return blocks_;
}
/**
* repeated .hadoop.hdfs.BlockProto blocks = 3;
*/
public int getBlocksCount() {
return blocks_.size();
}
/**
* repeated .hadoop.hdfs.BlockProto blocks = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index) {
return blocks_.get(index);
}
/**
* repeated .hadoop.hdfs.BlockProto blocks = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder(
int index) {
return blocks_.get(index);
}
// repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
public static final int TARGETS_FIELD_NUMBER = 4;
private java.util.List targets_;
/**
* repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
*/
public java.util.List getTargetsList() {
return targets_;
}
/**
* repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder>
getTargetsOrBuilderList() {
return targets_;
}
/**
* repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
*/
public int getTargetsCount() {
return targets_.size();
}
/**
* repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getTargets(int index) {
return targets_.get(index);
}
/**
* repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder getTargetsOrBuilder(
int index) {
return targets_.get(index);
}
// repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
public static final int TARGETSTORAGEUUIDS_FIELD_NUMBER = 5;
private java.util.List targetStorageUuids_;
/**
* repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
*/
public java.util.List getTargetStorageUuidsList() {
return targetStorageUuids_;
}
/**
* repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProtoOrBuilder>
getTargetStorageUuidsOrBuilderList() {
return targetStorageUuids_;
}
/**
* repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
*/
public int getTargetStorageUuidsCount() {
return targetStorageUuids_.size();
}
/**
* repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProto getTargetStorageUuids(int index) {
return targetStorageUuids_.get(index);
}
/**
* repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProtoOrBuilder getTargetStorageUuidsOrBuilder(
int index) {
return targetStorageUuids_.get(index);
}
// repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
public static final int TARGETSTORAGETYPES_FIELD_NUMBER = 6;
private java.util.List targetStorageTypes_;
/**
* repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
*/
public java.util.List getTargetStorageTypesList() {
return targetStorageTypes_;
}
/**
* repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder>
getTargetStorageTypesOrBuilderList() {
return targetStorageTypes_;
}
/**
* repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
*/
public int getTargetStorageTypesCount() {
return targetStorageTypes_.size();
}
/**
* repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getTargetStorageTypes(int index) {
return targetStorageTypes_.get(index);
}
/**
* repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getTargetStorageTypesOrBuilder(
int index) {
return targetStorageTypes_.get(index);
}
private void initFields() {
action_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action.TRANSFER;
blockPoolId_ = "";
blocks_ = java.util.Collections.emptyList();
targets_ = java.util.Collections.emptyList();
targetStorageUuids_ = java.util.Collections.emptyList();
targetStorageTypes_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasAction()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasBlockPoolId()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getBlocksCount(); i++) {
if (!getBlocks(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
for (int i = 0; i < getTargetsCount(); i++) {
if (!getTargets(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, action_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getBlockPoolIdBytes());
}
for (int i = 0; i < blocks_.size(); i++) {
output.writeMessage(3, blocks_.get(i));
}
for (int i = 0; i < targets_.size(); i++) {
output.writeMessage(4, targets_.get(i));
}
for (int i = 0; i < targetStorageUuids_.size(); i++) {
output.writeMessage(5, targetStorageUuids_.get(i));
}
for (int i = 0; i < targetStorageTypes_.size(); i++) {
output.writeMessage(6, targetStorageTypes_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, action_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getBlockPoolIdBytes());
}
for (int i = 0; i < blocks_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, blocks_.get(i));
}
for (int i = 0; i < targets_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(4, targets_.get(i));
}
for (int i = 0; i < targetStorageUuids_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(5, targetStorageUuids_.get(i));
}
for (int i = 0; i < targetStorageTypes_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(6, targetStorageTypes_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto) obj;
boolean result = true;
result = result && (hasAction() == other.hasAction());
if (hasAction()) {
result = result &&
(getAction() == other.getAction());
}
result = result && (hasBlockPoolId() == other.hasBlockPoolId());
if (hasBlockPoolId()) {
result = result && getBlockPoolId()
.equals(other.getBlockPoolId());
}
result = result && getBlocksList()
.equals(other.getBlocksList());
result = result && getTargetsList()
.equals(other.getTargetsList());
result = result && getTargetStorageUuidsList()
.equals(other.getTargetStorageUuidsList());
result = result && getTargetStorageTypesList()
.equals(other.getTargetStorageTypesList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasAction()) {
hash = (37 * hash) + ACTION_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getAction());
}
if (hasBlockPoolId()) {
hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER;
hash = (53 * hash) + getBlockPoolId().hashCode();
}
if (getBlocksCount() > 0) {
hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
hash = (53 * hash) + getBlocksList().hashCode();
}
if (getTargetsCount() > 0) {
hash = (37 * hash) + TARGETS_FIELD_NUMBER;
hash = (53 * hash) + getTargetsList().hashCode();
}
if (getTargetStorageUuidsCount() > 0) {
hash = (37 * hash) + TARGETSTORAGEUUIDS_FIELD_NUMBER;
hash = (53 * hash) + getTargetStorageUuidsList().hashCode();
}
if (getTargetStorageTypesCount() > 0) {
hash = (37 * hash) + TARGETSTORAGETYPES_FIELD_NUMBER;
hash = (53 * hash) + getTargetStorageTypesList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.BlockCommandProto}
*
*
**
* Command to instruct datanodes to perform certain action
* on the given set of blocks.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockCommandProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getBlocksFieldBuilder();
getTargetsFieldBuilder();
getTargetStorageUuidsFieldBuilder();
getTargetStorageTypesFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
action_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action.TRANSFER;
bitField0_ = (bitField0_ & ~0x00000001);
blockPoolId_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
if (blocksBuilder_ == null) {
blocks_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
} else {
blocksBuilder_.clear();
}
if (targetsBuilder_ == null) {
targets_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000008);
} else {
targetsBuilder_.clear();
}
if (targetStorageUuidsBuilder_ == null) {
targetStorageUuids_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000010);
} else {
targetStorageUuidsBuilder_.clear();
}
if (targetStorageTypesBuilder_ == null) {
targetStorageTypes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000020);
} else {
targetStorageTypesBuilder_.clear();
}
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockCommandProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.action_ = action_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.blockPoolId_ = blockPoolId_;
if (blocksBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004)) {
blocks_ = java.util.Collections.unmodifiableList(blocks_);
bitField0_ = (bitField0_ & ~0x00000004);
}
result.blocks_ = blocks_;
} else {
result.blocks_ = blocksBuilder_.build();
}
if (targetsBuilder_ == null) {
if (((bitField0_ & 0x00000008) == 0x00000008)) {
targets_ = java.util.Collections.unmodifiableList(targets_);
bitField0_ = (bitField0_ & ~0x00000008);
}
result.targets_ = targets_;
} else {
result.targets_ = targetsBuilder_.build();
}
if (targetStorageUuidsBuilder_ == null) {
if (((bitField0_ & 0x00000010) == 0x00000010)) {
targetStorageUuids_ = java.util.Collections.unmodifiableList(targetStorageUuids_);
bitField0_ = (bitField0_ & ~0x00000010);
}
result.targetStorageUuids_ = targetStorageUuids_;
} else {
result.targetStorageUuids_ = targetStorageUuidsBuilder_.build();
}
if (targetStorageTypesBuilder_ == null) {
if (((bitField0_ & 0x00000020) == 0x00000020)) {
targetStorageTypes_ = java.util.Collections.unmodifiableList(targetStorageTypes_);
bitField0_ = (bitField0_ & ~0x00000020);
}
result.targetStorageTypes_ = targetStorageTypes_;
} else {
result.targetStorageTypes_ = targetStorageTypesBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance()) return this;
if (other.hasAction()) {
setAction(other.getAction());
}
if (other.hasBlockPoolId()) {
bitField0_ |= 0x00000002;
blockPoolId_ = other.blockPoolId_;
onChanged();
}
if (blocksBuilder_ == null) {
if (!other.blocks_.isEmpty()) {
if (blocks_.isEmpty()) {
blocks_ = other.blocks_;
bitField0_ = (bitField0_ & ~0x00000004);
} else {
ensureBlocksIsMutable();
blocks_.addAll(other.blocks_);
}
onChanged();
}
} else {
if (!other.blocks_.isEmpty()) {
if (blocksBuilder_.isEmpty()) {
blocksBuilder_.dispose();
blocksBuilder_ = null;
blocks_ = other.blocks_;
bitField0_ = (bitField0_ & ~0x00000004);
blocksBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getBlocksFieldBuilder() : null;
} else {
blocksBuilder_.addAllMessages(other.blocks_);
}
}
}
if (targetsBuilder_ == null) {
if (!other.targets_.isEmpty()) {
if (targets_.isEmpty()) {
targets_ = other.targets_;
bitField0_ = (bitField0_ & ~0x00000008);
} else {
ensureTargetsIsMutable();
targets_.addAll(other.targets_);
}
onChanged();
}
} else {
if (!other.targets_.isEmpty()) {
if (targetsBuilder_.isEmpty()) {
targetsBuilder_.dispose();
targetsBuilder_ = null;
targets_ = other.targets_;
bitField0_ = (bitField0_ & ~0x00000008);
targetsBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getTargetsFieldBuilder() : null;
} else {
targetsBuilder_.addAllMessages(other.targets_);
}
}
}
if (targetStorageUuidsBuilder_ == null) {
if (!other.targetStorageUuids_.isEmpty()) {
if (targetStorageUuids_.isEmpty()) {
targetStorageUuids_ = other.targetStorageUuids_;
bitField0_ = (bitField0_ & ~0x00000010);
} else {
ensureTargetStorageUuidsIsMutable();
targetStorageUuids_.addAll(other.targetStorageUuids_);
}
onChanged();
}
} else {
if (!other.targetStorageUuids_.isEmpty()) {
if (targetStorageUuidsBuilder_.isEmpty()) {
targetStorageUuidsBuilder_.dispose();
targetStorageUuidsBuilder_ = null;
targetStorageUuids_ = other.targetStorageUuids_;
bitField0_ = (bitField0_ & ~0x00000010);
targetStorageUuidsBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getTargetStorageUuidsFieldBuilder() : null;
} else {
targetStorageUuidsBuilder_.addAllMessages(other.targetStorageUuids_);
}
}
}
if (targetStorageTypesBuilder_ == null) {
if (!other.targetStorageTypes_.isEmpty()) {
if (targetStorageTypes_.isEmpty()) {
targetStorageTypes_ = other.targetStorageTypes_;
bitField0_ = (bitField0_ & ~0x00000020);
} else {
ensureTargetStorageTypesIsMutable();
targetStorageTypes_.addAll(other.targetStorageTypes_);
}
onChanged();
}
} else {
if (!other.targetStorageTypes_.isEmpty()) {
if (targetStorageTypesBuilder_.isEmpty()) {
targetStorageTypesBuilder_.dispose();
targetStorageTypesBuilder_ = null;
targetStorageTypes_ = other.targetStorageTypes_;
bitField0_ = (bitField0_ & ~0x00000020);
targetStorageTypesBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getTargetStorageTypesFieldBuilder() : null;
} else {
targetStorageTypesBuilder_.addAllMessages(other.targetStorageTypes_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasAction()) {
return false;
}
if (!hasBlockPoolId()) {
return false;
}
for (int i = 0; i < getBlocksCount(); i++) {
if (!getBlocks(i).isInitialized()) {
return false;
}
}
for (int i = 0; i < getTargetsCount(); i++) {
if (!getTargets(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.datanode.BlockCommandProto.Action action = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action action_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action.TRANSFER;
/**
* required .hadoop.hdfs.datanode.BlockCommandProto.Action action = 1;
*/
public boolean hasAction() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.datanode.BlockCommandProto.Action action = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action getAction() {
return action_;
}
/**
* required .hadoop.hdfs.datanode.BlockCommandProto.Action action = 1;
*/
public Builder setAction(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
action_ = value;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.datanode.BlockCommandProto.Action action = 1;
*/
public Builder clearAction() {
bitField0_ = (bitField0_ & ~0x00000001);
action_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action.TRANSFER;
onChanged();
return this;
}
// required string blockPoolId = 2;
private java.lang.Object blockPoolId_ = "";
/**
* required string blockPoolId = 2;
*/
public boolean hasBlockPoolId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string blockPoolId = 2;
*/
public java.lang.String getBlockPoolId() {
java.lang.Object ref = blockPoolId_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
blockPoolId_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string blockPoolId = 2;
*/
public com.google.protobuf.ByteString
getBlockPoolIdBytes() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
blockPoolId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string blockPoolId = 2;
*/
public Builder setBlockPoolId(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
blockPoolId_ = value;
onChanged();
return this;
}
/**
* required string blockPoolId = 2;
*/
public Builder clearBlockPoolId() {
bitField0_ = (bitField0_ & ~0x00000002);
blockPoolId_ = getDefaultInstance().getBlockPoolId();
onChanged();
return this;
}
/**
* required string blockPoolId = 2;
*/
public Builder setBlockPoolIdBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
blockPoolId_ = value;
onChanged();
return this;
}
// repeated .hadoop.hdfs.BlockProto blocks = 3;
private java.util.List blocks_ =
java.util.Collections.emptyList();
private void ensureBlocksIsMutable() {
if (!((bitField0_ & 0x00000004) == 0x00000004)) {
blocks_ = new java.util.ArrayList(blocks_);
bitField0_ |= 0x00000004;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> blocksBuilder_;
/**
* repeated .hadoop.hdfs.BlockProto blocks = 3;
*/
public java.util.List getBlocksList() {
if (blocksBuilder_ == null) {
return java.util.Collections.unmodifiableList(blocks_);
} else {
return blocksBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.BlockProto blocks = 3;
*/
public int getBlocksCount() {
if (blocksBuilder_ == null) {
return blocks_.size();
} else {
return blocksBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.BlockProto blocks = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index) {
if (blocksBuilder_ == null) {
return blocks_.get(index);
} else {
return blocksBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.BlockProto blocks = 3;
*/
public Builder setBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.set(index, value);
onChanged();
} else {
blocksBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.BlockProto blocks = 3;
*/
public Builder setBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.set(index, builderForValue.build());
onChanged();
} else {
blocksBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.BlockProto blocks = 3;
*/
public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.add(value);
onChanged();
} else {
blocksBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.BlockProto blocks = 3;
*/
public Builder addBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.add(index, value);
onChanged();
} else {
blocksBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.BlockProto blocks = 3;
*/
public Builder addBlocks(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.add(builderForValue.build());
onChanged();
} else {
blocksBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.BlockProto blocks = 3;
*/
public Builder addBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.add(index, builderForValue.build());
onChanged();
} else {
blocksBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.BlockProto blocks = 3;
*/
public Builder addAllBlocks(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> values) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
super.addAll(values, blocks_);
onChanged();
} else {
blocksBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.BlockProto blocks = 3;
*/
public Builder clearBlocks() {
if (blocksBuilder_ == null) {
blocks_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
onChanged();
} else {
blocksBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.BlockProto blocks = 3;
*/
public Builder removeBlocks(int index) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.remove(index);
onChanged();
} else {
blocksBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.BlockProto blocks = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder getBlocksBuilder(
int index) {
return getBlocksFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.BlockProto blocks = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder(
int index) {
if (blocksBuilder_ == null) {
return blocks_.get(index); } else {
return blocksBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.BlockProto blocks = 3;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>
getBlocksOrBuilderList() {
if (blocksBuilder_ != null) {
return blocksBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(blocks_);
}
}
/**
* repeated .hadoop.hdfs.BlockProto blocks = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder addBlocksBuilder() {
return getBlocksFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.BlockProto blocks = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder addBlocksBuilder(
int index) {
return getBlocksFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.BlockProto blocks = 3;
*/
public java.util.List
getBlocksBuilderList() {
return getBlocksFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>
getBlocksFieldBuilder() {
if (blocksBuilder_ == null) {
blocksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>(
blocks_,
((bitField0_ & 0x00000004) == 0x00000004),
getParentForChildren(),
isClean());
blocks_ = null;
}
return blocksBuilder_;
}
// repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
private java.util.List targets_ =
java.util.Collections.emptyList();
private void ensureTargetsIsMutable() {
if (!((bitField0_ & 0x00000008) == 0x00000008)) {
targets_ = new java.util.ArrayList(targets_);
bitField0_ |= 0x00000008;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder> targetsBuilder_;
/**
* repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
*/
public java.util.List getTargetsList() {
if (targetsBuilder_ == null) {
return java.util.Collections.unmodifiableList(targets_);
} else {
return targetsBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
*/
public int getTargetsCount() {
if (targetsBuilder_ == null) {
return targets_.size();
} else {
return targetsBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getTargets(int index) {
if (targetsBuilder_ == null) {
return targets_.get(index);
} else {
return targetsBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
*/
public Builder setTargets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto value) {
if (targetsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetsIsMutable();
targets_.set(index, value);
onChanged();
} else {
targetsBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
*/
public Builder setTargets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder builderForValue) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
targets_.set(index, builderForValue.build());
onChanged();
} else {
targetsBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
*/
public Builder addTargets(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto value) {
if (targetsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetsIsMutable();
targets_.add(value);
onChanged();
} else {
targetsBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
*/
public Builder addTargets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto value) {
if (targetsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetsIsMutable();
targets_.add(index, value);
onChanged();
} else {
targetsBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
*/
public Builder addTargets(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder builderForValue) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
targets_.add(builderForValue.build());
onChanged();
} else {
targetsBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
*/
public Builder addTargets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder builderForValue) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
targets_.add(index, builderForValue.build());
onChanged();
} else {
targetsBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
*/
public Builder addAllTargets(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto> values) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
super.addAll(values, targets_);
onChanged();
} else {
targetsBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
*/
public Builder clearTargets() {
if (targetsBuilder_ == null) {
targets_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000008);
onChanged();
} else {
targetsBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
*/
public Builder removeTargets(int index) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
targets_.remove(index);
onChanged();
} else {
targetsBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder getTargetsBuilder(
int index) {
return getTargetsFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder getTargetsOrBuilder(
int index) {
if (targetsBuilder_ == null) {
return targets_.get(index); } else {
return targetsBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder>
getTargetsOrBuilderList() {
if (targetsBuilder_ != null) {
return targetsBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(targets_);
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder addTargetsBuilder() {
return getTargetsFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder addTargetsBuilder(
int index) {
return getTargetsFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;
*/
public java.util.List
getTargetsBuilderList() {
return getTargetsFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder>
getTargetsFieldBuilder() {
if (targetsBuilder_ == null) {
targetsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder>(
targets_,
((bitField0_ & 0x00000008) == 0x00000008),
getParentForChildren(),
isClean());
targets_ = null;
}
return targetsBuilder_;
}
// repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
private java.util.List targetStorageUuids_ =
java.util.Collections.emptyList();
private void ensureTargetStorageUuidsIsMutable() {
if (!((bitField0_ & 0x00000010) == 0x00000010)) {
targetStorageUuids_ = new java.util.ArrayList(targetStorageUuids_);
bitField0_ |= 0x00000010;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProtoOrBuilder> targetStorageUuidsBuilder_;
/**
* repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
*/
public java.util.List getTargetStorageUuidsList() {
if (targetStorageUuidsBuilder_ == null) {
return java.util.Collections.unmodifiableList(targetStorageUuids_);
} else {
return targetStorageUuidsBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
*/
public int getTargetStorageUuidsCount() {
if (targetStorageUuidsBuilder_ == null) {
return targetStorageUuids_.size();
} else {
return targetStorageUuidsBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProto getTargetStorageUuids(int index) {
if (targetStorageUuidsBuilder_ == null) {
return targetStorageUuids_.get(index);
} else {
return targetStorageUuidsBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
*/
public Builder setTargetStorageUuids(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProto value) {
if (targetStorageUuidsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetStorageUuidsIsMutable();
targetStorageUuids_.set(index, value);
onChanged();
} else {
targetStorageUuidsBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
*/
public Builder setTargetStorageUuids(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProto.Builder builderForValue) {
if (targetStorageUuidsBuilder_ == null) {
ensureTargetStorageUuidsIsMutable();
targetStorageUuids_.set(index, builderForValue.build());
onChanged();
} else {
targetStorageUuidsBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
*/
public Builder addTargetStorageUuids(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProto value) {
if (targetStorageUuidsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetStorageUuidsIsMutable();
targetStorageUuids_.add(value);
onChanged();
} else {
targetStorageUuidsBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
*/
public Builder addTargetStorageUuids(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProto value) {
if (targetStorageUuidsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetStorageUuidsIsMutable();
targetStorageUuids_.add(index, value);
onChanged();
} else {
targetStorageUuidsBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
*/
public Builder addTargetStorageUuids(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProto.Builder builderForValue) {
if (targetStorageUuidsBuilder_ == null) {
ensureTargetStorageUuidsIsMutable();
targetStorageUuids_.add(builderForValue.build());
onChanged();
} else {
targetStorageUuidsBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
*/
public Builder addTargetStorageUuids(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProto.Builder builderForValue) {
if (targetStorageUuidsBuilder_ == null) {
ensureTargetStorageUuidsIsMutable();
targetStorageUuids_.add(index, builderForValue.build());
onChanged();
} else {
targetStorageUuidsBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
*/
public Builder addAllTargetStorageUuids(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProto> values) {
if (targetStorageUuidsBuilder_ == null) {
ensureTargetStorageUuidsIsMutable();
super.addAll(values, targetStorageUuids_);
onChanged();
} else {
targetStorageUuidsBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
*/
public Builder clearTargetStorageUuids() {
if (targetStorageUuidsBuilder_ == null) {
targetStorageUuids_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000010);
onChanged();
} else {
targetStorageUuidsBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
*/
public Builder removeTargetStorageUuids(int index) {
if (targetStorageUuidsBuilder_ == null) {
ensureTargetStorageUuidsIsMutable();
targetStorageUuids_.remove(index);
onChanged();
} else {
targetStorageUuidsBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProto.Builder getTargetStorageUuidsBuilder(
int index) {
return getTargetStorageUuidsFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProtoOrBuilder getTargetStorageUuidsOrBuilder(
int index) {
if (targetStorageUuidsBuilder_ == null) {
return targetStorageUuids_.get(index); } else {
return targetStorageUuidsBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProtoOrBuilder>
getTargetStorageUuidsOrBuilderList() {
if (targetStorageUuidsBuilder_ != null) {
return targetStorageUuidsBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(targetStorageUuids_);
}
}
/**
* repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProto.Builder addTargetStorageUuidsBuilder() {
return getTargetStorageUuidsFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProto.Builder addTargetStorageUuidsBuilder(
int index) {
return getTargetStorageUuidsFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;
*/
public java.util.List
getTargetStorageUuidsBuilderList() {
return getTargetStorageUuidsFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProtoOrBuilder>
getTargetStorageUuidsFieldBuilder() {
if (targetStorageUuidsBuilder_ == null) {
targetStorageUuidsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProtoOrBuilder>(
targetStorageUuids_,
((bitField0_ & 0x00000010) == 0x00000010),
getParentForChildren(),
isClean());
targetStorageUuids_ = null;
}
return targetStorageUuidsBuilder_;
}
// repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
private java.util.List targetStorageTypes_ =
java.util.Collections.emptyList();
private void ensureTargetStorageTypesIsMutable() {
if (!((bitField0_ & 0x00000020) == 0x00000020)) {
targetStorageTypes_ = new java.util.ArrayList(targetStorageTypes_);
bitField0_ |= 0x00000020;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> targetStorageTypesBuilder_;
/**
* repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
*/
public java.util.List getTargetStorageTypesList() {
if (targetStorageTypesBuilder_ == null) {
return java.util.Collections.unmodifiableList(targetStorageTypes_);
} else {
return targetStorageTypesBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
*/
public int getTargetStorageTypesCount() {
if (targetStorageTypesBuilder_ == null) {
return targetStorageTypes_.size();
} else {
return targetStorageTypesBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getTargetStorageTypes(int index) {
if (targetStorageTypesBuilder_ == null) {
return targetStorageTypes_.get(index);
} else {
return targetStorageTypesBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
*/
public Builder setTargetStorageTypes(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) {
if (targetStorageTypesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetStorageTypesIsMutable();
targetStorageTypes_.set(index, value);
onChanged();
} else {
targetStorageTypesBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
*/
public Builder setTargetStorageTypes(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder builderForValue) {
if (targetStorageTypesBuilder_ == null) {
ensureTargetStorageTypesIsMutable();
targetStorageTypes_.set(index, builderForValue.build());
onChanged();
} else {
targetStorageTypesBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
*/
public Builder addTargetStorageTypes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) {
if (targetStorageTypesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetStorageTypesIsMutable();
targetStorageTypes_.add(value);
onChanged();
} else {
targetStorageTypesBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
*/
public Builder addTargetStorageTypes(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) {
if (targetStorageTypesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetStorageTypesIsMutable();
targetStorageTypes_.add(index, value);
onChanged();
} else {
targetStorageTypesBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
*/
public Builder addTargetStorageTypes(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder builderForValue) {
if (targetStorageTypesBuilder_ == null) {
ensureTargetStorageTypesIsMutable();
targetStorageTypes_.add(builderForValue.build());
onChanged();
} else {
targetStorageTypesBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
*/
public Builder addTargetStorageTypes(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder builderForValue) {
if (targetStorageTypesBuilder_ == null) {
ensureTargetStorageTypesIsMutable();
targetStorageTypes_.add(index, builderForValue.build());
onChanged();
} else {
targetStorageTypesBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
*/
public Builder addAllTargetStorageTypes(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto> values) {
if (targetStorageTypesBuilder_ == null) {
ensureTargetStorageTypesIsMutable();
super.addAll(values, targetStorageTypes_);
onChanged();
} else {
targetStorageTypesBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
*/
public Builder clearTargetStorageTypes() {
if (targetStorageTypesBuilder_ == null) {
targetStorageTypes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000020);
onChanged();
} else {
targetStorageTypesBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
*/
public Builder removeTargetStorageTypes(int index) {
if (targetStorageTypesBuilder_ == null) {
ensureTargetStorageTypesIsMutable();
targetStorageTypes_.remove(index);
onChanged();
} else {
targetStorageTypesBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder getTargetStorageTypesBuilder(
int index) {
return getTargetStorageTypesFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getTargetStorageTypesOrBuilder(
int index) {
if (targetStorageTypesBuilder_ == null) {
return targetStorageTypes_.get(index); } else {
return targetStorageTypesBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder>
getTargetStorageTypesOrBuilderList() {
if (targetStorageTypesBuilder_ != null) {
return targetStorageTypesBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(targetStorageTypes_);
}
}
/**
* repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder addTargetStorageTypesBuilder() {
return getTargetStorageTypesFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder addTargetStorageTypesBuilder(
int index) {
return getTargetStorageTypesFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;
*/
public java.util.List
getTargetStorageTypesBuilderList() {
return getTargetStorageTypesFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder>
getTargetStorageTypesFieldBuilder() {
if (targetStorageTypesBuilder_ == null) {
targetStorageTypesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder>(
targetStorageTypes_,
((bitField0_ & 0x00000020) == 0x00000020),
getParentForChildren(),
isClean());
targetStorageTypes_ = null;
}
return targetStorageTypesBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.BlockCommandProto)
}
static {
defaultInstance = new BlockCommandProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.BlockCommandProto)
}
public interface BlockIdCommandProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.datanode.BlockIdCommandProto.Action action = 1;
/**
* required .hadoop.hdfs.datanode.BlockIdCommandProto.Action action = 1;
*/
boolean hasAction();
/**
* required .hadoop.hdfs.datanode.BlockIdCommandProto.Action action = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Action getAction();
// required string blockPoolId = 2;
/**
* required string blockPoolId = 2;
*/
boolean hasBlockPoolId();
/**
* required string blockPoolId = 2;
*/
java.lang.String getBlockPoolId();
/**
* required string blockPoolId = 2;
*/
com.google.protobuf.ByteString
getBlockPoolIdBytes();
// repeated uint64 blockIds = 3 [packed = true];
/**
* repeated uint64 blockIds = 3 [packed = true];
*/
java.util.List getBlockIdsList();
/**
* repeated uint64 blockIds = 3 [packed = true];
*/
int getBlockIdsCount();
/**
* repeated uint64 blockIds = 3 [packed = true];
*/
long getBlockIds(int index);
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.BlockIdCommandProto}
*
*
**
* Command to instruct datanodes to perform certain action
* on the given set of block IDs.
*
*/
public static final class BlockIdCommandProto extends
com.google.protobuf.GeneratedMessage
implements BlockIdCommandProtoOrBuilder {
// Use BlockIdCommandProto.newBuilder() to construct.
private BlockIdCommandProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private BlockIdCommandProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final BlockIdCommandProto defaultInstance;
public static BlockIdCommandProto getDefaultInstance() {
return defaultInstance;
}
public BlockIdCommandProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private BlockIdCommandProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Action value = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Action.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
action_ = value;
}
break;
}
case 18: {
bitField0_ |= 0x00000002;
blockPoolId_ = input.readBytes();
break;
}
case 24: {
if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
blockIds_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000004;
}
blockIds_.add(input.readUInt64());
break;
}
case 26: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00000004) == 0x00000004) && input.getBytesUntilLimit() > 0) {
blockIds_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000004;
}
while (input.getBytesUntilLimit() > 0) {
blockIds_.add(input.readUInt64());
}
input.popLimit(limit);
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
blockIds_ = java.util.Collections.unmodifiableList(blockIds_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockIdCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockIdCommandProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public BlockIdCommandProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new BlockIdCommandProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
/**
* Protobuf enum {@code hadoop.hdfs.datanode.BlockIdCommandProto.Action}
*/
public enum Action
implements com.google.protobuf.ProtocolMessageEnum {
/**
* CACHE = 1;
*/
CACHE(0, 1),
/**
* UNCACHE = 2;
*/
UNCACHE(1, 2),
;
/**
* CACHE = 1;
*/
public static final int CACHE_VALUE = 1;
/**
* UNCACHE = 2;
*/
public static final int UNCACHE_VALUE = 2;
public final int getNumber() { return value; }
public static Action valueOf(int value) {
switch (value) {
case 1: return CACHE;
case 2: return UNCACHE;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public Action findValueByNumber(int number) {
return Action.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDescriptor().getEnumTypes().get(0);
}
private static final Action[] VALUES = values();
public static Action valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private Action(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.datanode.BlockIdCommandProto.Action)
}
private int bitField0_;
// required .hadoop.hdfs.datanode.BlockIdCommandProto.Action action = 1;
public static final int ACTION_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Action action_;
/**
* required .hadoop.hdfs.datanode.BlockIdCommandProto.Action action = 1;
*/
public boolean hasAction() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.datanode.BlockIdCommandProto.Action action = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Action getAction() {
return action_;
}
// required string blockPoolId = 2;
public static final int BLOCKPOOLID_FIELD_NUMBER = 2;
private java.lang.Object blockPoolId_;
/**
* required string blockPoolId = 2;
*/
public boolean hasBlockPoolId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string blockPoolId = 2;
*/
public java.lang.String getBlockPoolId() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
blockPoolId_ = s;
}
return s;
}
}
/**
* required string blockPoolId = 2;
*/
public com.google.protobuf.ByteString
getBlockPoolIdBytes() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
blockPoolId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// repeated uint64 blockIds = 3 [packed = true];
public static final int BLOCKIDS_FIELD_NUMBER = 3;
private java.util.List blockIds_;
/**
* repeated uint64 blockIds = 3 [packed = true];
*/
public java.util.List
getBlockIdsList() {
return blockIds_;
}
/**
* repeated uint64 blockIds = 3 [packed = true];
*/
public int getBlockIdsCount() {
return blockIds_.size();
}
/**
* repeated uint64 blockIds = 3 [packed = true];
*/
public long getBlockIds(int index) {
return blockIds_.get(index);
}
private int blockIdsMemoizedSerializedSize = -1;
private void initFields() {
action_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Action.CACHE;
blockPoolId_ = "";
blockIds_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasAction()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasBlockPoolId()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, action_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getBlockPoolIdBytes());
}
if (getBlockIdsList().size() > 0) {
output.writeRawVarint32(26);
output.writeRawVarint32(blockIdsMemoizedSerializedSize);
}
for (int i = 0; i < blockIds_.size(); i++) {
output.writeUInt64NoTag(blockIds_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, action_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getBlockPoolIdBytes());
}
{
int dataSize = 0;
for (int i = 0; i < blockIds_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeUInt64SizeNoTag(blockIds_.get(i));
}
size += dataSize;
if (!getBlockIdsList().isEmpty()) {
size += 1;
size += com.google.protobuf.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
blockIdsMemoizedSerializedSize = dataSize;
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto) obj;
boolean result = true;
result = result && (hasAction() == other.hasAction());
if (hasAction()) {
result = result &&
(getAction() == other.getAction());
}
result = result && (hasBlockPoolId() == other.hasBlockPoolId());
if (hasBlockPoolId()) {
result = result && getBlockPoolId()
.equals(other.getBlockPoolId());
}
result = result && getBlockIdsList()
.equals(other.getBlockIdsList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasAction()) {
hash = (37 * hash) + ACTION_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getAction());
}
if (hasBlockPoolId()) {
hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER;
hash = (53 * hash) + getBlockPoolId().hashCode();
}
if (getBlockIdsCount() > 0) {
hash = (37 * hash) + BLOCKIDS_FIELD_NUMBER;
hash = (53 * hash) + getBlockIdsList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.BlockIdCommandProto}
*
*
**
* Command to instruct datanodes to perform certain action
* on the given set of block IDs.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockIdCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockIdCommandProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
action_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Action.CACHE;
bitField0_ = (bitField0_ & ~0x00000001);
blockPoolId_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
blockIds_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockIdCommandProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.action_ = action_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.blockPoolId_ = blockPoolId_;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
blockIds_ = java.util.Collections.unmodifiableList(blockIds_);
bitField0_ = (bitField0_ & ~0x00000004);
}
result.blockIds_ = blockIds_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance()) return this;
if (other.hasAction()) {
setAction(other.getAction());
}
if (other.hasBlockPoolId()) {
bitField0_ |= 0x00000002;
blockPoolId_ = other.blockPoolId_;
onChanged();
}
if (!other.blockIds_.isEmpty()) {
if (blockIds_.isEmpty()) {
blockIds_ = other.blockIds_;
bitField0_ = (bitField0_ & ~0x00000004);
} else {
ensureBlockIdsIsMutable();
blockIds_.addAll(other.blockIds_);
}
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasAction()) {
return false;
}
if (!hasBlockPoolId()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.datanode.BlockIdCommandProto.Action action = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Action action_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Action.CACHE;
/**
* required .hadoop.hdfs.datanode.BlockIdCommandProto.Action action = 1;
*/
public boolean hasAction() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.datanode.BlockIdCommandProto.Action action = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Action getAction() {
return action_;
}
/**
* required .hadoop.hdfs.datanode.BlockIdCommandProto.Action action = 1;
*/
public Builder setAction(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Action value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
action_ = value;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.datanode.BlockIdCommandProto.Action action = 1;
*/
public Builder clearAction() {
bitField0_ = (bitField0_ & ~0x00000001);
action_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Action.CACHE;
onChanged();
return this;
}
// required string blockPoolId = 2;
private java.lang.Object blockPoolId_ = "";
/**
* required string blockPoolId = 2;
*/
public boolean hasBlockPoolId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string blockPoolId = 2;
*/
public java.lang.String getBlockPoolId() {
java.lang.Object ref = blockPoolId_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
blockPoolId_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string blockPoolId = 2;
*/
public com.google.protobuf.ByteString
getBlockPoolIdBytes() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
blockPoolId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string blockPoolId = 2;
*/
public Builder setBlockPoolId(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
blockPoolId_ = value;
onChanged();
return this;
}
/**
* required string blockPoolId = 2;
*/
public Builder clearBlockPoolId() {
bitField0_ = (bitField0_ & ~0x00000002);
blockPoolId_ = getDefaultInstance().getBlockPoolId();
onChanged();
return this;
}
/**
* required string blockPoolId = 2;
*/
public Builder setBlockPoolIdBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
blockPoolId_ = value;
onChanged();
return this;
}
// repeated uint64 blockIds = 3 [packed = true];
private java.util.List blockIds_ = java.util.Collections.emptyList();
private void ensureBlockIdsIsMutable() {
if (!((bitField0_ & 0x00000004) == 0x00000004)) {
blockIds_ = new java.util.ArrayList(blockIds_);
bitField0_ |= 0x00000004;
}
}
/**
* repeated uint64 blockIds = 3 [packed = true];
*/
public java.util.List
getBlockIdsList() {
return java.util.Collections.unmodifiableList(blockIds_);
}
/**
* repeated uint64 blockIds = 3 [packed = true];
*/
public int getBlockIdsCount() {
return blockIds_.size();
}
/**
* repeated uint64 blockIds = 3 [packed = true];
*/
public long getBlockIds(int index) {
return blockIds_.get(index);
}
/**
* repeated uint64 blockIds = 3 [packed = true];
*/
public Builder setBlockIds(
int index, long value) {
ensureBlockIdsIsMutable();
blockIds_.set(index, value);
onChanged();
return this;
}
/**
* repeated uint64 blockIds = 3 [packed = true];
*/
public Builder addBlockIds(long value) {
ensureBlockIdsIsMutable();
blockIds_.add(value);
onChanged();
return this;
}
/**
* repeated uint64 blockIds = 3 [packed = true];
*/
public Builder addAllBlockIds(
java.lang.Iterable extends java.lang.Long> values) {
ensureBlockIdsIsMutable();
super.addAll(values, blockIds_);
onChanged();
return this;
}
/**
* repeated uint64 blockIds = 3 [packed = true];
*/
public Builder clearBlockIds() {
blockIds_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.BlockIdCommandProto)
}
static {
defaultInstance = new BlockIdCommandProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.BlockIdCommandProto)
}
public interface BlockRecoveryCommandProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
/**
* repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
*/
java.util.List
getBlocksList();
/**
* repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto getBlocks(int index);
/**
* repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
*/
int getBlocksCount();
/**
* repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProtoOrBuilder>
getBlocksOrBuilderList();
/**
* repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProtoOrBuilder getBlocksOrBuilder(
int index);
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.BlockRecoveryCommandProto}
*
*
**
* List of blocks to be recovered by the datanode
*
*/
public static final class BlockRecoveryCommandProto extends
com.google.protobuf.GeneratedMessage
implements BlockRecoveryCommandProtoOrBuilder {
// Use BlockRecoveryCommandProto.newBuilder() to construct.
private BlockRecoveryCommandProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private BlockRecoveryCommandProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final BlockRecoveryCommandProto defaultInstance;
public static BlockRecoveryCommandProto getDefaultInstance() {
return defaultInstance;
}
public BlockRecoveryCommandProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private BlockRecoveryCommandProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
blocks_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
blocks_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.PARSER, extensionRegistry));
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
blocks_ = java.util.Collections.unmodifiableList(blocks_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockRecoveryCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockRecoveryCommandProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public BlockRecoveryCommandProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new BlockRecoveryCommandProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
// repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
public static final int BLOCKS_FIELD_NUMBER = 1;
private java.util.List blocks_;
/**
* repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
*/
public java.util.List getBlocksList() {
return blocks_;
}
/**
* repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProtoOrBuilder>
getBlocksOrBuilderList() {
return blocks_;
}
/**
* repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
*/
public int getBlocksCount() {
return blocks_.size();
}
/**
* repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto getBlocks(int index) {
return blocks_.get(index);
}
/**
* repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProtoOrBuilder getBlocksOrBuilder(
int index) {
return blocks_.get(index);
}
private void initFields() {
blocks_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
for (int i = 0; i < getBlocksCount(); i++) {
if (!getBlocks(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < blocks_.size(); i++) {
output.writeMessage(1, blocks_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
for (int i = 0; i < blocks_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, blocks_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto) obj;
boolean result = true;
result = result && getBlocksList()
.equals(other.getBlocksList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (getBlocksCount() > 0) {
hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
hash = (53 * hash) + getBlocksList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.BlockRecoveryCommandProto}
*
*
**
* List of blocks to be recovered by the datanode
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockRecoveryCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockRecoveryCommandProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getBlocksFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (blocksBuilder_ == null) {
blocks_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
} else {
blocksBuilder_.clear();
}
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockRecoveryCommandProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto(this);
int from_bitField0_ = bitField0_;
if (blocksBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001)) {
blocks_ = java.util.Collections.unmodifiableList(blocks_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.blocks_ = blocks_;
} else {
result.blocks_ = blocksBuilder_.build();
}
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance()) return this;
if (blocksBuilder_ == null) {
if (!other.blocks_.isEmpty()) {
if (blocks_.isEmpty()) {
blocks_ = other.blocks_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureBlocksIsMutable();
blocks_.addAll(other.blocks_);
}
onChanged();
}
} else {
if (!other.blocks_.isEmpty()) {
if (blocksBuilder_.isEmpty()) {
blocksBuilder_.dispose();
blocksBuilder_ = null;
blocks_ = other.blocks_;
bitField0_ = (bitField0_ & ~0x00000001);
blocksBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getBlocksFieldBuilder() : null;
} else {
blocksBuilder_.addAllMessages(other.blocks_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
for (int i = 0; i < getBlocksCount(); i++) {
if (!getBlocks(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
private java.util.List blocks_ =
java.util.Collections.emptyList();
private void ensureBlocksIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
blocks_ = new java.util.ArrayList(blocks_);
bitField0_ |= 0x00000001;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProtoOrBuilder> blocksBuilder_;
/**
* repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
*/
public java.util.List getBlocksList() {
if (blocksBuilder_ == null) {
return java.util.Collections.unmodifiableList(blocks_);
} else {
return blocksBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
*/
public int getBlocksCount() {
if (blocksBuilder_ == null) {
return blocks_.size();
} else {
return blocksBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto getBlocks(int index) {
if (blocksBuilder_ == null) {
return blocks_.get(index);
} else {
return blocksBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
*/
public Builder setBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.set(index, value);
onChanged();
} else {
blocksBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
*/
public Builder setBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.set(index, builderForValue.build());
onChanged();
} else {
blocksBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
*/
public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.add(value);
onChanged();
} else {
blocksBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
*/
public Builder addBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.add(index, value);
onChanged();
} else {
blocksBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
*/
public Builder addBlocks(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.add(builderForValue.build());
onChanged();
} else {
blocksBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
*/
public Builder addBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.add(index, builderForValue.build());
onChanged();
} else {
blocksBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
*/
public Builder addAllBlocks(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto> values) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
super.addAll(values, blocks_);
onChanged();
} else {
blocksBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
*/
public Builder clearBlocks() {
if (blocksBuilder_ == null) {
blocks_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
} else {
blocksBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
*/
public Builder removeBlocks(int index) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.remove(index);
onChanged();
} else {
blocksBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.Builder getBlocksBuilder(
int index) {
return getBlocksFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProtoOrBuilder getBlocksOrBuilder(
int index) {
if (blocksBuilder_ == null) {
return blocks_.get(index); } else {
return blocksBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProtoOrBuilder>
getBlocksOrBuilderList() {
if (blocksBuilder_ != null) {
return blocksBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(blocks_);
}
}
/**
* repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.Builder addBlocksBuilder() {
return getBlocksFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.Builder addBlocksBuilder(
int index) {
return getBlocksFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;
*/
public java.util.List
getBlocksBuilderList() {
return getBlocksFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProtoOrBuilder>
getBlocksFieldBuilder() {
if (blocksBuilder_ == null) {
blocksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProtoOrBuilder>(
blocks_,
((bitField0_ & 0x00000001) == 0x00000001),
getParentForChildren(),
isClean());
blocks_ = null;
}
return blocksBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.BlockRecoveryCommandProto)
}
static {
defaultInstance = new BlockRecoveryCommandProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.BlockRecoveryCommandProto)
}
public interface FinalizeCommandProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string blockPoolId = 1;
/**
* required string blockPoolId = 1;
*
*
* Block pool to be finalized
*
*/
boolean hasBlockPoolId();
/**
* required string blockPoolId = 1;
*
*
* Block pool to be finalized
*
*/
java.lang.String getBlockPoolId();
/**
* required string blockPoolId = 1;
*
*
* Block pool to be finalized
*
*/
com.google.protobuf.ByteString
getBlockPoolIdBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.FinalizeCommandProto}
*
*
**
* Finalize the upgrade at the datanode
*
*/
public static final class FinalizeCommandProto extends
com.google.protobuf.GeneratedMessage
implements FinalizeCommandProtoOrBuilder {
// Use FinalizeCommandProto.newBuilder() to construct.
private FinalizeCommandProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private FinalizeCommandProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final FinalizeCommandProto defaultInstance;
public static FinalizeCommandProto getDefaultInstance() {
return defaultInstance;
}
public FinalizeCommandProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private FinalizeCommandProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
blockPoolId_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_FinalizeCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_FinalizeCommandProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public FinalizeCommandProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new FinalizeCommandProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required string blockPoolId = 1;
public static final int BLOCKPOOLID_FIELD_NUMBER = 1;
private java.lang.Object blockPoolId_;
/**
* required string blockPoolId = 1;
*
*
* Block pool to be finalized
*
*/
public boolean hasBlockPoolId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string blockPoolId = 1;
*
*
* Block pool to be finalized
*
*/
public java.lang.String getBlockPoolId() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
blockPoolId_ = s;
}
return s;
}
}
/**
* required string blockPoolId = 1;
*
*
* Block pool to be finalized
*
*/
public com.google.protobuf.ByteString
getBlockPoolIdBytes() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
blockPoolId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
blockPoolId_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasBlockPoolId()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getBlockPoolIdBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getBlockPoolIdBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto) obj;
boolean result = true;
result = result && (hasBlockPoolId() == other.hasBlockPoolId());
if (hasBlockPoolId()) {
result = result && getBlockPoolId()
.equals(other.getBlockPoolId());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasBlockPoolId()) {
hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER;
hash = (53 * hash) + getBlockPoolId().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.FinalizeCommandProto}
*
*
**
* Finalize the upgrade at the datanode
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_FinalizeCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_FinalizeCommandProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
blockPoolId_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_FinalizeCommandProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.blockPoolId_ = blockPoolId_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance()) return this;
if (other.hasBlockPoolId()) {
bitField0_ |= 0x00000001;
blockPoolId_ = other.blockPoolId_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasBlockPoolId()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string blockPoolId = 1;
private java.lang.Object blockPoolId_ = "";
/**
* required string blockPoolId = 1;
*
*
* Block pool to be finalized
*
*/
public boolean hasBlockPoolId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string blockPoolId = 1;
*
*
* Block pool to be finalized
*
*/
public java.lang.String getBlockPoolId() {
java.lang.Object ref = blockPoolId_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
blockPoolId_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string blockPoolId = 1;
*
*
* Block pool to be finalized
*
*/
public com.google.protobuf.ByteString
getBlockPoolIdBytes() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
blockPoolId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string blockPoolId = 1;
*
*
* Block pool to be finalized
*
*/
public Builder setBlockPoolId(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
blockPoolId_ = value;
onChanged();
return this;
}
/**
* required string blockPoolId = 1;
*
*
* Block pool to be finalized
*
*/
public Builder clearBlockPoolId() {
bitField0_ = (bitField0_ & ~0x00000001);
blockPoolId_ = getDefaultInstance().getBlockPoolId();
onChanged();
return this;
}
/**
* required string blockPoolId = 1;
*
*
* Block pool to be finalized
*
*/
public Builder setBlockPoolIdBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
blockPoolId_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.FinalizeCommandProto)
}
static {
defaultInstance = new FinalizeCommandProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.FinalizeCommandProto)
}
public interface KeyUpdateCommandProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;
/**
* required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;
*/
boolean hasKeys();
/**
* required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto getKeys();
/**
* required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.KeyUpdateCommandProto}
*
*
**
* Update the block keys at the datanode
*
*/
public static final class KeyUpdateCommandProto extends
com.google.protobuf.GeneratedMessage
implements KeyUpdateCommandProtoOrBuilder {
// Use KeyUpdateCommandProto.newBuilder() to construct.
private KeyUpdateCommandProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private KeyUpdateCommandProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final KeyUpdateCommandProto defaultInstance;
public static KeyUpdateCommandProto getDefaultInstance() {
return defaultInstance;
}
public KeyUpdateCommandProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private KeyUpdateCommandProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = keys_.toBuilder();
}
keys_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(keys_);
keys_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_KeyUpdateCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_KeyUpdateCommandProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public KeyUpdateCommandProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new KeyUpdateCommandProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;
public static final int KEYS_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto keys_;
/**
* required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;
*/
public boolean hasKeys() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto getKeys() {
return keys_;
}
/**
* required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder() {
return keys_;
}
private void initFields() {
keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasKeys()) {
memoizedIsInitialized = 0;
return false;
}
if (!getKeys().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, keys_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, keys_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto) obj;
boolean result = true;
result = result && (hasKeys() == other.hasKeys());
if (hasKeys()) {
result = result && getKeys()
.equals(other.getKeys());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasKeys()) {
hash = (37 * hash) + KEYS_FIELD_NUMBER;
hash = (53 * hash) + getKeys().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.KeyUpdateCommandProto}
*
*
**
* Update the block keys at the datanode
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_KeyUpdateCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_KeyUpdateCommandProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getKeysFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (keysBuilder_ == null) {
keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.getDefaultInstance();
} else {
keysBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_KeyUpdateCommandProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (keysBuilder_ == null) {
result.keys_ = keys_;
} else {
result.keys_ = keysBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance()) return this;
if (other.hasKeys()) {
mergeKeys(other.getKeys());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasKeys()) {
return false;
}
if (!getKeys().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder> keysBuilder_;
/**
* required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;
*/
public boolean hasKeys() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto getKeys() {
if (keysBuilder_ == null) {
return keys_;
} else {
return keysBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;
*/
public Builder setKeys(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto value) {
if (keysBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
keys_ = value;
onChanged();
} else {
keysBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;
*/
public Builder setKeys(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.Builder builderForValue) {
if (keysBuilder_ == null) {
keys_ = builderForValue.build();
onChanged();
} else {
keysBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;
*/
public Builder mergeKeys(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto value) {
if (keysBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
keys_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.getDefaultInstance()) {
keys_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.newBuilder(keys_).mergeFrom(value).buildPartial();
} else {
keys_ = value;
}
onChanged();
} else {
keysBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;
*/
public Builder clearKeys() {
if (keysBuilder_ == null) {
keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.getDefaultInstance();
onChanged();
} else {
keysBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.Builder getKeysBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getKeysFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder() {
if (keysBuilder_ != null) {
return keysBuilder_.getMessageOrBuilder();
} else {
return keys_;
}
}
/**
* required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder>
getKeysFieldBuilder() {
if (keysBuilder_ == null) {
keysBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder>(
keys_,
getParentForChildren(),
isClean());
keys_ = null;
}
return keysBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.KeyUpdateCommandProto)
}
static {
defaultInstance = new KeyUpdateCommandProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.KeyUpdateCommandProto)
}
public interface RegisterCommandProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.RegisterCommandProto}
*
*
**
* Instruct datanode to register with the namenode
*
*/
public static final class RegisterCommandProto extends
com.google.protobuf.GeneratedMessage
implements RegisterCommandProtoOrBuilder {
// Use RegisterCommandProto.newBuilder() to construct.
private RegisterCommandProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private RegisterCommandProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final RegisterCommandProto defaultInstance;
public static RegisterCommandProto getDefaultInstance() {
return defaultInstance;
}
public RegisterCommandProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private RegisterCommandProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_RegisterCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_RegisterCommandProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public RegisterCommandProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new RegisterCommandProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.RegisterCommandProto}
*
*
**
* Instruct datanode to register with the namenode
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_RegisterCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_RegisterCommandProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_RegisterCommandProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.RegisterCommandProto)
}
static {
defaultInstance = new RegisterCommandProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.RegisterCommandProto)
}
public interface RegisterDatanodeRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
boolean hasRegistration();
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration();
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.RegisterDatanodeRequestProto}
*
*
**
* registration - Information of the datanode registering with the namenode
*
*/
public static final class RegisterDatanodeRequestProto extends
com.google.protobuf.GeneratedMessage
implements RegisterDatanodeRequestProtoOrBuilder {
// Use RegisterDatanodeRequestProto.newBuilder() to construct.
private RegisterDatanodeRequestProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private RegisterDatanodeRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final RegisterDatanodeRequestProto defaultInstance;
public static RegisterDatanodeRequestProto getDefaultInstance() {
return defaultInstance;
}
public RegisterDatanodeRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private RegisterDatanodeRequestProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = registration_.toBuilder();
}
registration_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(registration_);
registration_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_RegisterDatanodeRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_RegisterDatanodeRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public RegisterDatanodeRequestProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new RegisterDatanodeRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
public static final int REGISTRATION_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_;
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public boolean hasRegistration() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
return registration_;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
return registration_;
}
private void initFields() {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasRegistration()) {
memoizedIsInitialized = 0;
return false;
}
if (!getRegistration().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, registration_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, registration_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto) obj;
boolean result = true;
result = result && (hasRegistration() == other.hasRegistration());
if (hasRegistration()) {
result = result && getRegistration()
.equals(other.getRegistration());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasRegistration()) {
hash = (37 * hash) + REGISTRATION_FIELD_NUMBER;
hash = (53 * hash) + getRegistration().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.RegisterDatanodeRequestProto}
*
*
**
* registration - Information of the datanode registering with the namenode
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_RegisterDatanodeRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_RegisterDatanodeRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getRegistrationFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (registrationBuilder_ == null) {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
} else {
registrationBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_RegisterDatanodeRequestProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (registrationBuilder_ == null) {
result.registration_ = registration_;
} else {
result.registration_ = registrationBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.getDefaultInstance()) return this;
if (other.hasRegistration()) {
mergeRegistration(other.getRegistration());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasRegistration()) {
return false;
}
if (!getRegistration().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registrationBuilder_;
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public boolean hasRegistration() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
if (registrationBuilder_ == null) {
return registration_;
} else {
return registrationBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
if (registrationBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
registration_ = value;
onChanged();
} else {
registrationBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public Builder setRegistration(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) {
if (registrationBuilder_ == null) {
registration_ = builderForValue.build();
onChanged();
} else {
registrationBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
if (registrationBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
registration_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) {
registration_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial();
} else {
registration_ = value;
}
onChanged();
} else {
registrationBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public Builder clearRegistration() {
if (registrationBuilder_ == null) {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
onChanged();
} else {
registrationBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistrationBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getRegistrationFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
if (registrationBuilder_ != null) {
return registrationBuilder_.getMessageOrBuilder();
} else {
return registration_;
}
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>
getRegistrationFieldBuilder() {
if (registrationBuilder_ == null) {
registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>(
registration_,
getParentForChildren(),
isClean());
registration_ = null;
}
return registrationBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.RegisterDatanodeRequestProto)
}
static {
defaultInstance = new RegisterDatanodeRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.RegisterDatanodeRequestProto)
}
public interface RegisterDatanodeResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
boolean hasRegistration();
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration();
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.RegisterDatanodeResponseProto}
*
*
**
* registration - Update registration of the datanode that successfully
* registered. StorageInfo will be updated to include new
* storage ID if the datanode did not have one in the request.
*
*/
public static final class RegisterDatanodeResponseProto extends
com.google.protobuf.GeneratedMessage
implements RegisterDatanodeResponseProtoOrBuilder {
// Use RegisterDatanodeResponseProto.newBuilder() to construct.
private RegisterDatanodeResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private RegisterDatanodeResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final RegisterDatanodeResponseProto defaultInstance;
public static RegisterDatanodeResponseProto getDefaultInstance() {
return defaultInstance;
}
public RegisterDatanodeResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private RegisterDatanodeResponseProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = registration_.toBuilder();
}
registration_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(registration_);
registration_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_RegisterDatanodeResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_RegisterDatanodeResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public RegisterDatanodeResponseProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new RegisterDatanodeResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
public static final int REGISTRATION_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_;
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public boolean hasRegistration() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
return registration_;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
return registration_;
}
private void initFields() {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasRegistration()) {
memoizedIsInitialized = 0;
return false;
}
if (!getRegistration().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, registration_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, registration_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto) obj;
boolean result = true;
result = result && (hasRegistration() == other.hasRegistration());
if (hasRegistration()) {
result = result && getRegistration()
.equals(other.getRegistration());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasRegistration()) {
hash = (37 * hash) + REGISTRATION_FIELD_NUMBER;
hash = (53 * hash) + getRegistration().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.RegisterDatanodeResponseProto}
*
*
**
* registration - Update registration of the datanode that successfully
* registered. StorageInfo will be updated to include new
* storage ID if the datanode did not have one in the request.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_RegisterDatanodeResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_RegisterDatanodeResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getRegistrationFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (registrationBuilder_ == null) {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
} else {
registrationBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_RegisterDatanodeResponseProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (registrationBuilder_ == null) {
result.registration_ = registration_;
} else {
result.registration_ = registrationBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDefaultInstance()) return this;
if (other.hasRegistration()) {
mergeRegistration(other.getRegistration());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasRegistration()) {
return false;
}
if (!getRegistration().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registrationBuilder_;
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public boolean hasRegistration() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
if (registrationBuilder_ == null) {
return registration_;
} else {
return registrationBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
if (registrationBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
registration_ = value;
onChanged();
} else {
registrationBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public Builder setRegistration(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) {
if (registrationBuilder_ == null) {
registration_ = builderForValue.build();
onChanged();
} else {
registrationBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
if (registrationBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
registration_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) {
registration_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial();
} else {
registration_ = value;
}
onChanged();
} else {
registrationBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public Builder clearRegistration() {
if (registrationBuilder_ == null) {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
onChanged();
} else {
registrationBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistrationBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getRegistrationFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
if (registrationBuilder_ != null) {
return registrationBuilder_.getMessageOrBuilder();
} else {
return registration_;
}
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>
getRegistrationFieldBuilder() {
if (registrationBuilder_ == null) {
registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>(
registration_,
getParentForChildren(),
isClean());
registration_ = null;
}
return registrationBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.RegisterDatanodeResponseProto)
}
static {
defaultInstance = new RegisterDatanodeResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.RegisterDatanodeResponseProto)
}
public interface VolumeFailureSummaryProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated string failedStorageLocations = 1;
/**
* repeated string failedStorageLocations = 1;
*/
java.util.List
getFailedStorageLocationsList();
/**
* repeated string failedStorageLocations = 1;
*/
int getFailedStorageLocationsCount();
/**
* repeated string failedStorageLocations = 1;
*/
java.lang.String getFailedStorageLocations(int index);
/**
* repeated string failedStorageLocations = 1;
*/
com.google.protobuf.ByteString
getFailedStorageLocationsBytes(int index);
// required uint64 lastVolumeFailureDate = 2;
/**
* required uint64 lastVolumeFailureDate = 2;
*/
boolean hasLastVolumeFailureDate();
/**
* required uint64 lastVolumeFailureDate = 2;
*/
long getLastVolumeFailureDate();
// required uint64 estimatedCapacityLostTotal = 3;
/**
* required uint64 estimatedCapacityLostTotal = 3;
*/
boolean hasEstimatedCapacityLostTotal();
/**
* required uint64 estimatedCapacityLostTotal = 3;
*/
long getEstimatedCapacityLostTotal();
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.VolumeFailureSummaryProto}
*
*
**
* failedStorageLocations - storage locations that have failed
* lastVolumeFailureDate - date/time of last volume failure
* estimatedCapacityLost - estimate of total capacity lost due to volume failures
*
*/
public static final class VolumeFailureSummaryProto extends
com.google.protobuf.GeneratedMessage
implements VolumeFailureSummaryProtoOrBuilder {
// Use VolumeFailureSummaryProto.newBuilder() to construct.
private VolumeFailureSummaryProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private VolumeFailureSummaryProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final VolumeFailureSummaryProto defaultInstance;
public static VolumeFailureSummaryProto getDefaultInstance() {
return defaultInstance;
}
public VolumeFailureSummaryProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private VolumeFailureSummaryProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
failedStorageLocations_ = new com.google.protobuf.LazyStringArrayList();
mutable_bitField0_ |= 0x00000001;
}
failedStorageLocations_.add(input.readBytes());
break;
}
case 16: {
bitField0_ |= 0x00000001;
lastVolumeFailureDate_ = input.readUInt64();
break;
}
case 24: {
bitField0_ |= 0x00000002;
estimatedCapacityLostTotal_ = input.readUInt64();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
failedStorageLocations_ = new com.google.protobuf.UnmodifiableLazyStringList(failedStorageLocations_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_VolumeFailureSummaryProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_VolumeFailureSummaryProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public VolumeFailureSummaryProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new VolumeFailureSummaryProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// repeated string failedStorageLocations = 1;
public static final int FAILEDSTORAGELOCATIONS_FIELD_NUMBER = 1;
private com.google.protobuf.LazyStringList failedStorageLocations_;
/**
* repeated string failedStorageLocations = 1;
*/
public java.util.List
getFailedStorageLocationsList() {
return failedStorageLocations_;
}
/**
* repeated string failedStorageLocations = 1;
*/
public int getFailedStorageLocationsCount() {
return failedStorageLocations_.size();
}
/**
* repeated string failedStorageLocations = 1;
*/
public java.lang.String getFailedStorageLocations(int index) {
return failedStorageLocations_.get(index);
}
/**
* repeated string failedStorageLocations = 1;
*/
public com.google.protobuf.ByteString
getFailedStorageLocationsBytes(int index) {
return failedStorageLocations_.getByteString(index);
}
// required uint64 lastVolumeFailureDate = 2;
public static final int LASTVOLUMEFAILUREDATE_FIELD_NUMBER = 2;
private long lastVolumeFailureDate_;
/**
* required uint64 lastVolumeFailureDate = 2;
*/
public boolean hasLastVolumeFailureDate() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint64 lastVolumeFailureDate = 2;
*/
public long getLastVolumeFailureDate() {
return lastVolumeFailureDate_;
}
// required uint64 estimatedCapacityLostTotal = 3;
public static final int ESTIMATEDCAPACITYLOSTTOTAL_FIELD_NUMBER = 3;
private long estimatedCapacityLostTotal_;
/**
* required uint64 estimatedCapacityLostTotal = 3;
*/
public boolean hasEstimatedCapacityLostTotal() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 estimatedCapacityLostTotal = 3;
*/
public long getEstimatedCapacityLostTotal() {
return estimatedCapacityLostTotal_;
}
private void initFields() {
failedStorageLocations_ = com.google.protobuf.LazyStringArrayList.EMPTY;
lastVolumeFailureDate_ = 0L;
estimatedCapacityLostTotal_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasLastVolumeFailureDate()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasEstimatedCapacityLostTotal()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < failedStorageLocations_.size(); i++) {
output.writeBytes(1, failedStorageLocations_.getByteString(i));
}
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt64(2, lastVolumeFailureDate_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(3, estimatedCapacityLostTotal_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
{
int dataSize = 0;
for (int i = 0; i < failedStorageLocations_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeBytesSizeNoTag(failedStorageLocations_.getByteString(i));
}
size += dataSize;
size += 1 * getFailedStorageLocationsList().size();
}
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, lastVolumeFailureDate_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(3, estimatedCapacityLostTotal_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto) obj;
boolean result = true;
result = result && getFailedStorageLocationsList()
.equals(other.getFailedStorageLocationsList());
result = result && (hasLastVolumeFailureDate() == other.hasLastVolumeFailureDate());
if (hasLastVolumeFailureDate()) {
result = result && (getLastVolumeFailureDate()
== other.getLastVolumeFailureDate());
}
result = result && (hasEstimatedCapacityLostTotal() == other.hasEstimatedCapacityLostTotal());
if (hasEstimatedCapacityLostTotal()) {
result = result && (getEstimatedCapacityLostTotal()
== other.getEstimatedCapacityLostTotal());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (getFailedStorageLocationsCount() > 0) {
hash = (37 * hash) + FAILEDSTORAGELOCATIONS_FIELD_NUMBER;
hash = (53 * hash) + getFailedStorageLocationsList().hashCode();
}
if (hasLastVolumeFailureDate()) {
hash = (37 * hash) + LASTVOLUMEFAILUREDATE_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getLastVolumeFailureDate());
}
if (hasEstimatedCapacityLostTotal()) {
hash = (37 * hash) + ESTIMATEDCAPACITYLOSTTOTAL_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getEstimatedCapacityLostTotal());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.VolumeFailureSummaryProto}
*
*
**
* failedStorageLocations - storage locations that have failed
* lastVolumeFailureDate - date/time of last volume failure
* estimatedCapacityLost - estimate of total capacity lost due to volume failures
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_VolumeFailureSummaryProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_VolumeFailureSummaryProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
failedStorageLocations_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000001);
lastVolumeFailureDate_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
estimatedCapacityLostTotal_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_VolumeFailureSummaryProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
failedStorageLocations_ = new com.google.protobuf.UnmodifiableLazyStringList(
failedStorageLocations_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.failedStorageLocations_ = failedStorageLocations_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000001;
}
result.lastVolumeFailureDate_ = lastVolumeFailureDate_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000002;
}
result.estimatedCapacityLostTotal_ = estimatedCapacityLostTotal_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.getDefaultInstance()) return this;
if (!other.failedStorageLocations_.isEmpty()) {
if (failedStorageLocations_.isEmpty()) {
failedStorageLocations_ = other.failedStorageLocations_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureFailedStorageLocationsIsMutable();
failedStorageLocations_.addAll(other.failedStorageLocations_);
}
onChanged();
}
if (other.hasLastVolumeFailureDate()) {
setLastVolumeFailureDate(other.getLastVolumeFailureDate());
}
if (other.hasEstimatedCapacityLostTotal()) {
setEstimatedCapacityLostTotal(other.getEstimatedCapacityLostTotal());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasLastVolumeFailureDate()) {
return false;
}
if (!hasEstimatedCapacityLostTotal()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// repeated string failedStorageLocations = 1;
private com.google.protobuf.LazyStringList failedStorageLocations_ = com.google.protobuf.LazyStringArrayList.EMPTY;
private void ensureFailedStorageLocationsIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
failedStorageLocations_ = new com.google.protobuf.LazyStringArrayList(failedStorageLocations_);
bitField0_ |= 0x00000001;
}
}
/**
* repeated string failedStorageLocations = 1;
*/
public java.util.List
getFailedStorageLocationsList() {
return java.util.Collections.unmodifiableList(failedStorageLocations_);
}
/**
* repeated string failedStorageLocations = 1;
*/
public int getFailedStorageLocationsCount() {
return failedStorageLocations_.size();
}
/**
* repeated string failedStorageLocations = 1;
*/
public java.lang.String getFailedStorageLocations(int index) {
return failedStorageLocations_.get(index);
}
/**
* repeated string failedStorageLocations = 1;
*/
public com.google.protobuf.ByteString
getFailedStorageLocationsBytes(int index) {
return failedStorageLocations_.getByteString(index);
}
/**
* repeated string failedStorageLocations = 1;
*/
public Builder setFailedStorageLocations(
int index, java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureFailedStorageLocationsIsMutable();
failedStorageLocations_.set(index, value);
onChanged();
return this;
}
/**
* repeated string failedStorageLocations = 1;
*/
public Builder addFailedStorageLocations(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureFailedStorageLocationsIsMutable();
failedStorageLocations_.add(value);
onChanged();
return this;
}
/**
* repeated string failedStorageLocations = 1;
*/
public Builder addAllFailedStorageLocations(
java.lang.Iterable values) {
ensureFailedStorageLocationsIsMutable();
super.addAll(values, failedStorageLocations_);
onChanged();
return this;
}
/**
* repeated string failedStorageLocations = 1;
*/
public Builder clearFailedStorageLocations() {
failedStorageLocations_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
return this;
}
/**
* repeated string failedStorageLocations = 1;
*/
public Builder addFailedStorageLocationsBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
ensureFailedStorageLocationsIsMutable();
failedStorageLocations_.add(value);
onChanged();
return this;
}
// required uint64 lastVolumeFailureDate = 2;
private long lastVolumeFailureDate_ ;
/**
* required uint64 lastVolumeFailureDate = 2;
*/
public boolean hasLastVolumeFailureDate() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 lastVolumeFailureDate = 2;
*/
public long getLastVolumeFailureDate() {
return lastVolumeFailureDate_;
}
/**
* required uint64 lastVolumeFailureDate = 2;
*/
public Builder setLastVolumeFailureDate(long value) {
bitField0_ |= 0x00000002;
lastVolumeFailureDate_ = value;
onChanged();
return this;
}
/**
* required uint64 lastVolumeFailureDate = 2;
*/
public Builder clearLastVolumeFailureDate() {
bitField0_ = (bitField0_ & ~0x00000002);
lastVolumeFailureDate_ = 0L;
onChanged();
return this;
}
// required uint64 estimatedCapacityLostTotal = 3;
private long estimatedCapacityLostTotal_ ;
/**
* required uint64 estimatedCapacityLostTotal = 3;
*/
public boolean hasEstimatedCapacityLostTotal() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 estimatedCapacityLostTotal = 3;
*/
public long getEstimatedCapacityLostTotal() {
return estimatedCapacityLostTotal_;
}
/**
* required uint64 estimatedCapacityLostTotal = 3;
*/
public Builder setEstimatedCapacityLostTotal(long value) {
bitField0_ |= 0x00000004;
estimatedCapacityLostTotal_ = value;
onChanged();
return this;
}
/**
* required uint64 estimatedCapacityLostTotal = 3;
*/
public Builder clearEstimatedCapacityLostTotal() {
bitField0_ = (bitField0_ & ~0x00000004);
estimatedCapacityLostTotal_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.VolumeFailureSummaryProto)
}
static {
defaultInstance = new VolumeFailureSummaryProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.VolumeFailureSummaryProto)
}
public interface HeartbeatRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
boolean hasRegistration();
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration();
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder();
// repeated .hadoop.hdfs.StorageReportProto reports = 2;
/**
* repeated .hadoop.hdfs.StorageReportProto reports = 2;
*/
java.util.List
getReportsList();
/**
* repeated .hadoop.hdfs.StorageReportProto reports = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto getReports(int index);
/**
* repeated .hadoop.hdfs.StorageReportProto reports = 2;
*/
int getReportsCount();
/**
* repeated .hadoop.hdfs.StorageReportProto reports = 2;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProtoOrBuilder>
getReportsOrBuilderList();
/**
* repeated .hadoop.hdfs.StorageReportProto reports = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProtoOrBuilder getReportsOrBuilder(
int index);
// optional uint32 xmitsInProgress = 3 [default = 0];
/**
* optional uint32 xmitsInProgress = 3 [default = 0];
*/
boolean hasXmitsInProgress();
/**
* optional uint32 xmitsInProgress = 3 [default = 0];
*/
int getXmitsInProgress();
// optional uint32 xceiverCount = 4 [default = 0];
/**
* optional uint32 xceiverCount = 4 [default = 0];
*/
boolean hasXceiverCount();
/**
* optional uint32 xceiverCount = 4 [default = 0];
*/
int getXceiverCount();
// optional uint32 failedVolumes = 5 [default = 0];
/**
* optional uint32 failedVolumes = 5 [default = 0];
*/
boolean hasFailedVolumes();
/**
* optional uint32 failedVolumes = 5 [default = 0];
*/
int getFailedVolumes();
// optional uint64 cacheCapacity = 6 [default = 0];
/**
* optional uint64 cacheCapacity = 6 [default = 0];
*/
boolean hasCacheCapacity();
/**
* optional uint64 cacheCapacity = 6 [default = 0];
*/
long getCacheCapacity();
// optional uint64 cacheUsed = 7 [default = 0];
/**
* optional uint64 cacheUsed = 7 [default = 0];
*/
boolean hasCacheUsed();
/**
* optional uint64 cacheUsed = 7 [default = 0];
*/
long getCacheUsed();
// optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;
/**
* optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;
*/
boolean hasVolumeFailureSummary();
/**
* optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto getVolumeFailureSummary();
/**
* optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProtoOrBuilder getVolumeFailureSummaryOrBuilder();
// optional bool requestFullBlockReportLease = 9 [default = false];
/**
* optional bool requestFullBlockReportLease = 9 [default = false];
*/
boolean hasRequestFullBlockReportLease();
/**
* optional bool requestFullBlockReportLease = 9 [default = false];
*/
boolean getRequestFullBlockReportLease();
// repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
/**
* repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
*/
java.util.List
getSlowPeersList();
/**
* repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto getSlowPeers(int index);
/**
* repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
*/
int getSlowPeersCount();
/**
* repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProtoOrBuilder>
getSlowPeersOrBuilderList();
/**
* repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProtoOrBuilder getSlowPeersOrBuilder(
int index);
// repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
/**
* repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
*/
java.util.List
getSlowDisksList();
/**
* repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto getSlowDisks(int index);
/**
* repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
*/
int getSlowDisksCount();
/**
* repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProtoOrBuilder>
getSlowDisksOrBuilderList();
/**
* repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProtoOrBuilder getSlowDisksOrBuilder(
int index);
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.HeartbeatRequestProto}
*
*
**
* registration - datanode registration information
* capacity - total storage capacity available at the datanode
* dfsUsed - storage used by HDFS
* remaining - remaining storage available for HDFS
* blockPoolUsed - storage used by the block pool
* xmitsInProgress - number of transfers from this datanode to others
* xceiverCount - number of active transceiver threads
* failedVolumes - number of failed volumes. This is redundant with the
* information included in volumeFailureSummary, but the field is retained
* for backwards compatibility.
* cacheCapacity - total cache capacity available at the datanode
* cacheUsed - amount of cache used
* volumeFailureSummary - info about volume failures
* slowPeers - info about peer DataNodes that are suspected to be slow.
* slowDisks - info about DataNode disks that are suspected to be slow.
*
*/
public static final class HeartbeatRequestProto extends
com.google.protobuf.GeneratedMessage
implements HeartbeatRequestProtoOrBuilder {
// Use HeartbeatRequestProto.newBuilder() to construct.
private HeartbeatRequestProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private HeartbeatRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final HeartbeatRequestProto defaultInstance;
public static HeartbeatRequestProto getDefaultInstance() {
return defaultInstance;
}
public HeartbeatRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private HeartbeatRequestProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = registration_.toBuilder();
}
registration_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(registration_);
registration_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
reports_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000002;
}
reports_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.PARSER, extensionRegistry));
break;
}
case 24: {
bitField0_ |= 0x00000002;
xmitsInProgress_ = input.readUInt32();
break;
}
case 32: {
bitField0_ |= 0x00000004;
xceiverCount_ = input.readUInt32();
break;
}
case 40: {
bitField0_ |= 0x00000008;
failedVolumes_ = input.readUInt32();
break;
}
case 48: {
bitField0_ |= 0x00000010;
cacheCapacity_ = input.readUInt64();
break;
}
case 56: {
bitField0_ |= 0x00000020;
cacheUsed_ = input.readUInt64();
break;
}
case 66: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000040) == 0x00000040)) {
subBuilder = volumeFailureSummary_.toBuilder();
}
volumeFailureSummary_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(volumeFailureSummary_);
volumeFailureSummary_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000040;
break;
}
case 72: {
bitField0_ |= 0x00000080;
requestFullBlockReportLease_ = input.readBool();
break;
}
case 82: {
if (!((mutable_bitField0_ & 0x00000200) == 0x00000200)) {
slowPeers_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000200;
}
slowPeers_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.PARSER, extensionRegistry));
break;
}
case 90: {
if (!((mutable_bitField0_ & 0x00000400) == 0x00000400)) {
slowDisks_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000400;
}
slowDisks_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.PARSER, extensionRegistry));
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
reports_ = java.util.Collections.unmodifiableList(reports_);
}
if (((mutable_bitField0_ & 0x00000200) == 0x00000200)) {
slowPeers_ = java.util.Collections.unmodifiableList(slowPeers_);
}
if (((mutable_bitField0_ & 0x00000400) == 0x00000400)) {
slowDisks_ = java.util.Collections.unmodifiableList(slowDisks_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_HeartbeatRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_HeartbeatRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public HeartbeatRequestProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new HeartbeatRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
public static final int REGISTRATION_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_;
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public boolean hasRegistration() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
return registration_;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
return registration_;
}
// repeated .hadoop.hdfs.StorageReportProto reports = 2;
public static final int REPORTS_FIELD_NUMBER = 2;
private java.util.List reports_;
/**
* repeated .hadoop.hdfs.StorageReportProto reports = 2;
*/
public java.util.List getReportsList() {
return reports_;
}
/**
* repeated .hadoop.hdfs.StorageReportProto reports = 2;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProtoOrBuilder>
getReportsOrBuilderList() {
return reports_;
}
/**
* repeated .hadoop.hdfs.StorageReportProto reports = 2;
*/
public int getReportsCount() {
return reports_.size();
}
/**
* repeated .hadoop.hdfs.StorageReportProto reports = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto getReports(int index) {
return reports_.get(index);
}
/**
* repeated .hadoop.hdfs.StorageReportProto reports = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProtoOrBuilder getReportsOrBuilder(
int index) {
return reports_.get(index);
}
// optional uint32 xmitsInProgress = 3 [default = 0];
public static final int XMITSINPROGRESS_FIELD_NUMBER = 3;
private int xmitsInProgress_;
/**
* optional uint32 xmitsInProgress = 3 [default = 0];
*/
public boolean hasXmitsInProgress() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional uint32 xmitsInProgress = 3 [default = 0];
*/
public int getXmitsInProgress() {
return xmitsInProgress_;
}
// optional uint32 xceiverCount = 4 [default = 0];
public static final int XCEIVERCOUNT_FIELD_NUMBER = 4;
private int xceiverCount_;
/**
* optional uint32 xceiverCount = 4 [default = 0];
*/
public boolean hasXceiverCount() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional uint32 xceiverCount = 4 [default = 0];
*/
public int getXceiverCount() {
return xceiverCount_;
}
// optional uint32 failedVolumes = 5 [default = 0];
public static final int FAILEDVOLUMES_FIELD_NUMBER = 5;
private int failedVolumes_;
/**
* optional uint32 failedVolumes = 5 [default = 0];
*/
public boolean hasFailedVolumes() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional uint32 failedVolumes = 5 [default = 0];
*/
public int getFailedVolumes() {
return failedVolumes_;
}
// optional uint64 cacheCapacity = 6 [default = 0];
public static final int CACHECAPACITY_FIELD_NUMBER = 6;
private long cacheCapacity_;
/**
* optional uint64 cacheCapacity = 6 [default = 0];
*/
public boolean hasCacheCapacity() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional uint64 cacheCapacity = 6 [default = 0];
*/
public long getCacheCapacity() {
return cacheCapacity_;
}
// optional uint64 cacheUsed = 7 [default = 0];
public static final int CACHEUSED_FIELD_NUMBER = 7;
private long cacheUsed_;
/**
* optional uint64 cacheUsed = 7 [default = 0];
*/
public boolean hasCacheUsed() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional uint64 cacheUsed = 7 [default = 0];
*/
public long getCacheUsed() {
return cacheUsed_;
}
// optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;
public static final int VOLUMEFAILURESUMMARY_FIELD_NUMBER = 8;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto volumeFailureSummary_;
/**
* optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;
*/
public boolean hasVolumeFailureSummary() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto getVolumeFailureSummary() {
return volumeFailureSummary_;
}
/**
* optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProtoOrBuilder getVolumeFailureSummaryOrBuilder() {
return volumeFailureSummary_;
}
// optional bool requestFullBlockReportLease = 9 [default = false];
public static final int REQUESTFULLBLOCKREPORTLEASE_FIELD_NUMBER = 9;
private boolean requestFullBlockReportLease_;
/**
* optional bool requestFullBlockReportLease = 9 [default = false];
*/
public boolean hasRequestFullBlockReportLease() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* optional bool requestFullBlockReportLease = 9 [default = false];
*/
public boolean getRequestFullBlockReportLease() {
return requestFullBlockReportLease_;
}
// repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
public static final int SLOWPEERS_FIELD_NUMBER = 10;
private java.util.List slowPeers_;
/**
* repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
*/
public java.util.List getSlowPeersList() {
return slowPeers_;
}
/**
* repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProtoOrBuilder>
getSlowPeersOrBuilderList() {
return slowPeers_;
}
/**
* repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
*/
public int getSlowPeersCount() {
return slowPeers_.size();
}
/**
* repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto getSlowPeers(int index) {
return slowPeers_.get(index);
}
/**
* repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProtoOrBuilder getSlowPeersOrBuilder(
int index) {
return slowPeers_.get(index);
}
// repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
public static final int SLOWDISKS_FIELD_NUMBER = 11;
private java.util.List slowDisks_;
/**
* repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
*/
public java.util.List getSlowDisksList() {
return slowDisks_;
}
/**
* repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProtoOrBuilder>
getSlowDisksOrBuilderList() {
return slowDisks_;
}
/**
* repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
*/
public int getSlowDisksCount() {
return slowDisks_.size();
}
/**
* repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto getSlowDisks(int index) {
return slowDisks_.get(index);
}
/**
* repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProtoOrBuilder getSlowDisksOrBuilder(
int index) {
return slowDisks_.get(index);
}
private void initFields() {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
reports_ = java.util.Collections.emptyList();
xmitsInProgress_ = 0;
xceiverCount_ = 0;
failedVolumes_ = 0;
cacheCapacity_ = 0L;
cacheUsed_ = 0L;
volumeFailureSummary_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.getDefaultInstance();
requestFullBlockReportLease_ = false;
slowPeers_ = java.util.Collections.emptyList();
slowDisks_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasRegistration()) {
memoizedIsInitialized = 0;
return false;
}
if (!getRegistration().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getReportsCount(); i++) {
if (!getReports(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasVolumeFailureSummary()) {
if (!getVolumeFailureSummary().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, registration_);
}
for (int i = 0; i < reports_.size(); i++) {
output.writeMessage(2, reports_.get(i));
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt32(3, xmitsInProgress_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt32(4, xceiverCount_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt32(5, failedVolumes_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeUInt64(6, cacheCapacity_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeUInt64(7, cacheUsed_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeMessage(8, volumeFailureSummary_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
output.writeBool(9, requestFullBlockReportLease_);
}
for (int i = 0; i < slowPeers_.size(); i++) {
output.writeMessage(10, slowPeers_.get(i));
}
for (int i = 0; i < slowDisks_.size(); i++) {
output.writeMessage(11, slowDisks_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, registration_);
}
for (int i = 0; i < reports_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, reports_.get(i));
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(3, xmitsInProgress_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(4, xceiverCount_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(5, failedVolumes_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(6, cacheCapacity_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(7, cacheUsed_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(8, volumeFailureSummary_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(9, requestFullBlockReportLease_);
}
for (int i = 0; i < slowPeers_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(10, slowPeers_.get(i));
}
for (int i = 0; i < slowDisks_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(11, slowDisks_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto) obj;
boolean result = true;
result = result && (hasRegistration() == other.hasRegistration());
if (hasRegistration()) {
result = result && getRegistration()
.equals(other.getRegistration());
}
result = result && getReportsList()
.equals(other.getReportsList());
result = result && (hasXmitsInProgress() == other.hasXmitsInProgress());
if (hasXmitsInProgress()) {
result = result && (getXmitsInProgress()
== other.getXmitsInProgress());
}
result = result && (hasXceiverCount() == other.hasXceiverCount());
if (hasXceiverCount()) {
result = result && (getXceiverCount()
== other.getXceiverCount());
}
result = result && (hasFailedVolumes() == other.hasFailedVolumes());
if (hasFailedVolumes()) {
result = result && (getFailedVolumes()
== other.getFailedVolumes());
}
result = result && (hasCacheCapacity() == other.hasCacheCapacity());
if (hasCacheCapacity()) {
result = result && (getCacheCapacity()
== other.getCacheCapacity());
}
result = result && (hasCacheUsed() == other.hasCacheUsed());
if (hasCacheUsed()) {
result = result && (getCacheUsed()
== other.getCacheUsed());
}
result = result && (hasVolumeFailureSummary() == other.hasVolumeFailureSummary());
if (hasVolumeFailureSummary()) {
result = result && getVolumeFailureSummary()
.equals(other.getVolumeFailureSummary());
}
result = result && (hasRequestFullBlockReportLease() == other.hasRequestFullBlockReportLease());
if (hasRequestFullBlockReportLease()) {
result = result && (getRequestFullBlockReportLease()
== other.getRequestFullBlockReportLease());
}
result = result && getSlowPeersList()
.equals(other.getSlowPeersList());
result = result && getSlowDisksList()
.equals(other.getSlowDisksList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasRegistration()) {
hash = (37 * hash) + REGISTRATION_FIELD_NUMBER;
hash = (53 * hash) + getRegistration().hashCode();
}
if (getReportsCount() > 0) {
hash = (37 * hash) + REPORTS_FIELD_NUMBER;
hash = (53 * hash) + getReportsList().hashCode();
}
if (hasXmitsInProgress()) {
hash = (37 * hash) + XMITSINPROGRESS_FIELD_NUMBER;
hash = (53 * hash) + getXmitsInProgress();
}
if (hasXceiverCount()) {
hash = (37 * hash) + XCEIVERCOUNT_FIELD_NUMBER;
hash = (53 * hash) + getXceiverCount();
}
if (hasFailedVolumes()) {
hash = (37 * hash) + FAILEDVOLUMES_FIELD_NUMBER;
hash = (53 * hash) + getFailedVolumes();
}
if (hasCacheCapacity()) {
hash = (37 * hash) + CACHECAPACITY_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getCacheCapacity());
}
if (hasCacheUsed()) {
hash = (37 * hash) + CACHEUSED_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getCacheUsed());
}
if (hasVolumeFailureSummary()) {
hash = (37 * hash) + VOLUMEFAILURESUMMARY_FIELD_NUMBER;
hash = (53 * hash) + getVolumeFailureSummary().hashCode();
}
if (hasRequestFullBlockReportLease()) {
hash = (37 * hash) + REQUESTFULLBLOCKREPORTLEASE_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getRequestFullBlockReportLease());
}
if (getSlowPeersCount() > 0) {
hash = (37 * hash) + SLOWPEERS_FIELD_NUMBER;
hash = (53 * hash) + getSlowPeersList().hashCode();
}
if (getSlowDisksCount() > 0) {
hash = (37 * hash) + SLOWDISKS_FIELD_NUMBER;
hash = (53 * hash) + getSlowDisksList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.HeartbeatRequestProto}
*
*
**
* registration - datanode registration information
* capacity - total storage capacity available at the datanode
* dfsUsed - storage used by HDFS
* remaining - remaining storage available for HDFS
* blockPoolUsed - storage used by the block pool
* xmitsInProgress - number of transfers from this datanode to others
* xceiverCount - number of active transceiver threads
* failedVolumes - number of failed volumes. This is redundant with the
* information included in volumeFailureSummary, but the field is retained
* for backwards compatibility.
* cacheCapacity - total cache capacity available at the datanode
* cacheUsed - amount of cache used
* volumeFailureSummary - info about volume failures
* slowPeers - info about peer DataNodes that are suspected to be slow.
* slowDisks - info about DataNode disks that are suspected to be slow.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_HeartbeatRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_HeartbeatRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getRegistrationFieldBuilder();
getReportsFieldBuilder();
getVolumeFailureSummaryFieldBuilder();
getSlowPeersFieldBuilder();
getSlowDisksFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (registrationBuilder_ == null) {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
} else {
registrationBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
if (reportsBuilder_ == null) {
reports_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
} else {
reportsBuilder_.clear();
}
xmitsInProgress_ = 0;
bitField0_ = (bitField0_ & ~0x00000004);
xceiverCount_ = 0;
bitField0_ = (bitField0_ & ~0x00000008);
failedVolumes_ = 0;
bitField0_ = (bitField0_ & ~0x00000010);
cacheCapacity_ = 0L;
bitField0_ = (bitField0_ & ~0x00000020);
cacheUsed_ = 0L;
bitField0_ = (bitField0_ & ~0x00000040);
if (volumeFailureSummaryBuilder_ == null) {
volumeFailureSummary_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.getDefaultInstance();
} else {
volumeFailureSummaryBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000080);
requestFullBlockReportLease_ = false;
bitField0_ = (bitField0_ & ~0x00000100);
if (slowPeersBuilder_ == null) {
slowPeers_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000200);
} else {
slowPeersBuilder_.clear();
}
if (slowDisksBuilder_ == null) {
slowDisks_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000400);
} else {
slowDisksBuilder_.clear();
}
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_HeartbeatRequestProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (registrationBuilder_ == null) {
result.registration_ = registration_;
} else {
result.registration_ = registrationBuilder_.build();
}
if (reportsBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002)) {
reports_ = java.util.Collections.unmodifiableList(reports_);
bitField0_ = (bitField0_ & ~0x00000002);
}
result.reports_ = reports_;
} else {
result.reports_ = reportsBuilder_.build();
}
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000002;
}
result.xmitsInProgress_ = xmitsInProgress_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000004;
}
result.xceiverCount_ = xceiverCount_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000008;
}
result.failedVolumes_ = failedVolumes_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000010;
}
result.cacheCapacity_ = cacheCapacity_;
if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
to_bitField0_ |= 0x00000020;
}
result.cacheUsed_ = cacheUsed_;
if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
to_bitField0_ |= 0x00000040;
}
if (volumeFailureSummaryBuilder_ == null) {
result.volumeFailureSummary_ = volumeFailureSummary_;
} else {
result.volumeFailureSummary_ = volumeFailureSummaryBuilder_.build();
}
if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
to_bitField0_ |= 0x00000080;
}
result.requestFullBlockReportLease_ = requestFullBlockReportLease_;
if (slowPeersBuilder_ == null) {
if (((bitField0_ & 0x00000200) == 0x00000200)) {
slowPeers_ = java.util.Collections.unmodifiableList(slowPeers_);
bitField0_ = (bitField0_ & ~0x00000200);
}
result.slowPeers_ = slowPeers_;
} else {
result.slowPeers_ = slowPeersBuilder_.build();
}
if (slowDisksBuilder_ == null) {
if (((bitField0_ & 0x00000400) == 0x00000400)) {
slowDisks_ = java.util.Collections.unmodifiableList(slowDisks_);
bitField0_ = (bitField0_ & ~0x00000400);
}
result.slowDisks_ = slowDisks_;
} else {
result.slowDisks_ = slowDisksBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.getDefaultInstance()) return this;
if (other.hasRegistration()) {
mergeRegistration(other.getRegistration());
}
if (reportsBuilder_ == null) {
if (!other.reports_.isEmpty()) {
if (reports_.isEmpty()) {
reports_ = other.reports_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureReportsIsMutable();
reports_.addAll(other.reports_);
}
onChanged();
}
} else {
if (!other.reports_.isEmpty()) {
if (reportsBuilder_.isEmpty()) {
reportsBuilder_.dispose();
reportsBuilder_ = null;
reports_ = other.reports_;
bitField0_ = (bitField0_ & ~0x00000002);
reportsBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getReportsFieldBuilder() : null;
} else {
reportsBuilder_.addAllMessages(other.reports_);
}
}
}
if (other.hasXmitsInProgress()) {
setXmitsInProgress(other.getXmitsInProgress());
}
if (other.hasXceiverCount()) {
setXceiverCount(other.getXceiverCount());
}
if (other.hasFailedVolumes()) {
setFailedVolumes(other.getFailedVolumes());
}
if (other.hasCacheCapacity()) {
setCacheCapacity(other.getCacheCapacity());
}
if (other.hasCacheUsed()) {
setCacheUsed(other.getCacheUsed());
}
if (other.hasVolumeFailureSummary()) {
mergeVolumeFailureSummary(other.getVolumeFailureSummary());
}
if (other.hasRequestFullBlockReportLease()) {
setRequestFullBlockReportLease(other.getRequestFullBlockReportLease());
}
if (slowPeersBuilder_ == null) {
if (!other.slowPeers_.isEmpty()) {
if (slowPeers_.isEmpty()) {
slowPeers_ = other.slowPeers_;
bitField0_ = (bitField0_ & ~0x00000200);
} else {
ensureSlowPeersIsMutable();
slowPeers_.addAll(other.slowPeers_);
}
onChanged();
}
} else {
if (!other.slowPeers_.isEmpty()) {
if (slowPeersBuilder_.isEmpty()) {
slowPeersBuilder_.dispose();
slowPeersBuilder_ = null;
slowPeers_ = other.slowPeers_;
bitField0_ = (bitField0_ & ~0x00000200);
slowPeersBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getSlowPeersFieldBuilder() : null;
} else {
slowPeersBuilder_.addAllMessages(other.slowPeers_);
}
}
}
if (slowDisksBuilder_ == null) {
if (!other.slowDisks_.isEmpty()) {
if (slowDisks_.isEmpty()) {
slowDisks_ = other.slowDisks_;
bitField0_ = (bitField0_ & ~0x00000400);
} else {
ensureSlowDisksIsMutable();
slowDisks_.addAll(other.slowDisks_);
}
onChanged();
}
} else {
if (!other.slowDisks_.isEmpty()) {
if (slowDisksBuilder_.isEmpty()) {
slowDisksBuilder_.dispose();
slowDisksBuilder_ = null;
slowDisks_ = other.slowDisks_;
bitField0_ = (bitField0_ & ~0x00000400);
slowDisksBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getSlowDisksFieldBuilder() : null;
} else {
slowDisksBuilder_.addAllMessages(other.slowDisks_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasRegistration()) {
return false;
}
if (!getRegistration().isInitialized()) {
return false;
}
for (int i = 0; i < getReportsCount(); i++) {
if (!getReports(i).isInitialized()) {
return false;
}
}
if (hasVolumeFailureSummary()) {
if (!getVolumeFailureSummary().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registrationBuilder_;
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public boolean hasRegistration() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
if (registrationBuilder_ == null) {
return registration_;
} else {
return registrationBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
if (registrationBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
registration_ = value;
onChanged();
} else {
registrationBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public Builder setRegistration(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) {
if (registrationBuilder_ == null) {
registration_ = builderForValue.build();
onChanged();
} else {
registrationBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
if (registrationBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
registration_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) {
registration_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial();
} else {
registration_ = value;
}
onChanged();
} else {
registrationBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public Builder clearRegistration() {
if (registrationBuilder_ == null) {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
onChanged();
} else {
registrationBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistrationBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getRegistrationFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
if (registrationBuilder_ != null) {
return registrationBuilder_.getMessageOrBuilder();
} else {
return registration_;
}
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*
*
* Datanode info
*
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>
getRegistrationFieldBuilder() {
if (registrationBuilder_ == null) {
registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>(
registration_,
getParentForChildren(),
isClean());
registration_ = null;
}
return registrationBuilder_;
}
// repeated .hadoop.hdfs.StorageReportProto reports = 2;
private java.util.List reports_ =
java.util.Collections.emptyList();
private void ensureReportsIsMutable() {
if (!((bitField0_ & 0x00000002) == 0x00000002)) {
reports_ = new java.util.ArrayList(reports_);
bitField0_ |= 0x00000002;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProtoOrBuilder> reportsBuilder_;
/**
* repeated .hadoop.hdfs.StorageReportProto reports = 2;
*/
public java.util.List getReportsList() {
if (reportsBuilder_ == null) {
return java.util.Collections.unmodifiableList(reports_);
} else {
return reportsBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.StorageReportProto reports = 2;
*/
public int getReportsCount() {
if (reportsBuilder_ == null) {
return reports_.size();
} else {
return reportsBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.StorageReportProto reports = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto getReports(int index) {
if (reportsBuilder_ == null) {
return reports_.get(index);
} else {
return reportsBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.StorageReportProto reports = 2;
*/
public Builder setReports(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto value) {
if (reportsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureReportsIsMutable();
reports_.set(index, value);
onChanged();
} else {
reportsBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageReportProto reports = 2;
*/
public Builder setReports(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder builderForValue) {
if (reportsBuilder_ == null) {
ensureReportsIsMutable();
reports_.set(index, builderForValue.build());
onChanged();
} else {
reportsBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageReportProto reports = 2;
*/
public Builder addReports(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto value) {
if (reportsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureReportsIsMutable();
reports_.add(value);
onChanged();
} else {
reportsBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageReportProto reports = 2;
*/
public Builder addReports(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto value) {
if (reportsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureReportsIsMutable();
reports_.add(index, value);
onChanged();
} else {
reportsBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageReportProto reports = 2;
*/
public Builder addReports(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder builderForValue) {
if (reportsBuilder_ == null) {
ensureReportsIsMutable();
reports_.add(builderForValue.build());
onChanged();
} else {
reportsBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageReportProto reports = 2;
*/
public Builder addReports(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder builderForValue) {
if (reportsBuilder_ == null) {
ensureReportsIsMutable();
reports_.add(index, builderForValue.build());
onChanged();
} else {
reportsBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageReportProto reports = 2;
*/
public Builder addAllReports(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto> values) {
if (reportsBuilder_ == null) {
ensureReportsIsMutable();
super.addAll(values, reports_);
onChanged();
} else {
reportsBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageReportProto reports = 2;
*/
public Builder clearReports() {
if (reportsBuilder_ == null) {
reports_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
} else {
reportsBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageReportProto reports = 2;
*/
public Builder removeReports(int index) {
if (reportsBuilder_ == null) {
ensureReportsIsMutable();
reports_.remove(index);
onChanged();
} else {
reportsBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageReportProto reports = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder getReportsBuilder(
int index) {
return getReportsFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.StorageReportProto reports = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProtoOrBuilder getReportsOrBuilder(
int index) {
if (reportsBuilder_ == null) {
return reports_.get(index); } else {
return reportsBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.StorageReportProto reports = 2;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProtoOrBuilder>
getReportsOrBuilderList() {
if (reportsBuilder_ != null) {
return reportsBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(reports_);
}
}
/**
* repeated .hadoop.hdfs.StorageReportProto reports = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder addReportsBuilder() {
return getReportsFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.StorageReportProto reports = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder addReportsBuilder(
int index) {
return getReportsFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.StorageReportProto reports = 2;
*/
public java.util.List
getReportsBuilderList() {
return getReportsFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProtoOrBuilder>
getReportsFieldBuilder() {
if (reportsBuilder_ == null) {
reportsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProtoOrBuilder>(
reports_,
((bitField0_ & 0x00000002) == 0x00000002),
getParentForChildren(),
isClean());
reports_ = null;
}
return reportsBuilder_;
}
// optional uint32 xmitsInProgress = 3 [default = 0];
private int xmitsInProgress_ ;
/**
* optional uint32 xmitsInProgress = 3 [default = 0];
*/
public boolean hasXmitsInProgress() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional uint32 xmitsInProgress = 3 [default = 0];
*/
public int getXmitsInProgress() {
return xmitsInProgress_;
}
/**
* optional uint32 xmitsInProgress = 3 [default = 0];
*/
public Builder setXmitsInProgress(int value) {
bitField0_ |= 0x00000004;
xmitsInProgress_ = value;
onChanged();
return this;
}
/**
* optional uint32 xmitsInProgress = 3 [default = 0];
*/
public Builder clearXmitsInProgress() {
bitField0_ = (bitField0_ & ~0x00000004);
xmitsInProgress_ = 0;
onChanged();
return this;
}
// optional uint32 xceiverCount = 4 [default = 0];
private int xceiverCount_ ;
/**
* optional uint32 xceiverCount = 4 [default = 0];
*/
public boolean hasXceiverCount() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional uint32 xceiverCount = 4 [default = 0];
*/
public int getXceiverCount() {
return xceiverCount_;
}
/**
* optional uint32 xceiverCount = 4 [default = 0];
*/
public Builder setXceiverCount(int value) {
bitField0_ |= 0x00000008;
xceiverCount_ = value;
onChanged();
return this;
}
/**
* optional uint32 xceiverCount = 4 [default = 0];
*/
public Builder clearXceiverCount() {
bitField0_ = (bitField0_ & ~0x00000008);
xceiverCount_ = 0;
onChanged();
return this;
}
// optional uint32 failedVolumes = 5 [default = 0];
private int failedVolumes_ ;
/**
* optional uint32 failedVolumes = 5 [default = 0];
*/
public boolean hasFailedVolumes() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional uint32 failedVolumes = 5 [default = 0];
*/
public int getFailedVolumes() {
return failedVolumes_;
}
/**
* optional uint32 failedVolumes = 5 [default = 0];
*/
public Builder setFailedVolumes(int value) {
bitField0_ |= 0x00000010;
failedVolumes_ = value;
onChanged();
return this;
}
/**
* optional uint32 failedVolumes = 5 [default = 0];
*/
public Builder clearFailedVolumes() {
bitField0_ = (bitField0_ & ~0x00000010);
failedVolumes_ = 0;
onChanged();
return this;
}
// optional uint64 cacheCapacity = 6 [default = 0];
private long cacheCapacity_ ;
/**
* optional uint64 cacheCapacity = 6 [default = 0];
*/
public boolean hasCacheCapacity() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional uint64 cacheCapacity = 6 [default = 0];
*/
public long getCacheCapacity() {
return cacheCapacity_;
}
/**
* optional uint64 cacheCapacity = 6 [default = 0];
*/
public Builder setCacheCapacity(long value) {
bitField0_ |= 0x00000020;
cacheCapacity_ = value;
onChanged();
return this;
}
/**
* optional uint64 cacheCapacity = 6 [default = 0];
*/
public Builder clearCacheCapacity() {
bitField0_ = (bitField0_ & ~0x00000020);
cacheCapacity_ = 0L;
onChanged();
return this;
}
// optional uint64 cacheUsed = 7 [default = 0];
private long cacheUsed_ ;
/**
* optional uint64 cacheUsed = 7 [default = 0];
*/
public boolean hasCacheUsed() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional uint64 cacheUsed = 7 [default = 0];
*/
public long getCacheUsed() {
return cacheUsed_;
}
/**
* optional uint64 cacheUsed = 7 [default = 0];
*/
public Builder setCacheUsed(long value) {
bitField0_ |= 0x00000040;
cacheUsed_ = value;
onChanged();
return this;
}
/**
* optional uint64 cacheUsed = 7 [default = 0];
*/
public Builder clearCacheUsed() {
bitField0_ = (bitField0_ & ~0x00000040);
cacheUsed_ = 0L;
onChanged();
return this;
}
// optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto volumeFailureSummary_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProtoOrBuilder> volumeFailureSummaryBuilder_;
/**
* optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;
*/
public boolean hasVolumeFailureSummary() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto getVolumeFailureSummary() {
if (volumeFailureSummaryBuilder_ == null) {
return volumeFailureSummary_;
} else {
return volumeFailureSummaryBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;
*/
public Builder setVolumeFailureSummary(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto value) {
if (volumeFailureSummaryBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
volumeFailureSummary_ = value;
onChanged();
} else {
volumeFailureSummaryBuilder_.setMessage(value);
}
bitField0_ |= 0x00000080;
return this;
}
/**
* optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;
*/
public Builder setVolumeFailureSummary(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.Builder builderForValue) {
if (volumeFailureSummaryBuilder_ == null) {
volumeFailureSummary_ = builderForValue.build();
onChanged();
} else {
volumeFailureSummaryBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000080;
return this;
}
/**
* optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;
*/
public Builder mergeVolumeFailureSummary(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto value) {
if (volumeFailureSummaryBuilder_ == null) {
if (((bitField0_ & 0x00000080) == 0x00000080) &&
volumeFailureSummary_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.getDefaultInstance()) {
volumeFailureSummary_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.newBuilder(volumeFailureSummary_).mergeFrom(value).buildPartial();
} else {
volumeFailureSummary_ = value;
}
onChanged();
} else {
volumeFailureSummaryBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000080;
return this;
}
/**
* optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;
*/
public Builder clearVolumeFailureSummary() {
if (volumeFailureSummaryBuilder_ == null) {
volumeFailureSummary_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.getDefaultInstance();
onChanged();
} else {
volumeFailureSummaryBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000080);
return this;
}
/**
* optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.Builder getVolumeFailureSummaryBuilder() {
bitField0_ |= 0x00000080;
onChanged();
return getVolumeFailureSummaryFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProtoOrBuilder getVolumeFailureSummaryOrBuilder() {
if (volumeFailureSummaryBuilder_ != null) {
return volumeFailureSummaryBuilder_.getMessageOrBuilder();
} else {
return volumeFailureSummary_;
}
}
/**
* optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProtoOrBuilder>
getVolumeFailureSummaryFieldBuilder() {
if (volumeFailureSummaryBuilder_ == null) {
volumeFailureSummaryBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProtoOrBuilder>(
volumeFailureSummary_,
getParentForChildren(),
isClean());
volumeFailureSummary_ = null;
}
return volumeFailureSummaryBuilder_;
}
// optional bool requestFullBlockReportLease = 9 [default = false];
private boolean requestFullBlockReportLease_ ;
/**
* optional bool requestFullBlockReportLease = 9 [default = false];
*/
public boolean hasRequestFullBlockReportLease() {
return ((bitField0_ & 0x00000100) == 0x00000100);
}
/**
* optional bool requestFullBlockReportLease = 9 [default = false];
*/
public boolean getRequestFullBlockReportLease() {
return requestFullBlockReportLease_;
}
/**
* optional bool requestFullBlockReportLease = 9 [default = false];
*/
public Builder setRequestFullBlockReportLease(boolean value) {
bitField0_ |= 0x00000100;
requestFullBlockReportLease_ = value;
onChanged();
return this;
}
/**
* optional bool requestFullBlockReportLease = 9 [default = false];
*/
public Builder clearRequestFullBlockReportLease() {
bitField0_ = (bitField0_ & ~0x00000100);
requestFullBlockReportLease_ = false;
onChanged();
return this;
}
// repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
private java.util.List slowPeers_ =
java.util.Collections.emptyList();
private void ensureSlowPeersIsMutable() {
if (!((bitField0_ & 0x00000200) == 0x00000200)) {
slowPeers_ = new java.util.ArrayList(slowPeers_);
bitField0_ |= 0x00000200;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProtoOrBuilder> slowPeersBuilder_;
/**
* repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
*/
public java.util.List getSlowPeersList() {
if (slowPeersBuilder_ == null) {
return java.util.Collections.unmodifiableList(slowPeers_);
} else {
return slowPeersBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
*/
public int getSlowPeersCount() {
if (slowPeersBuilder_ == null) {
return slowPeers_.size();
} else {
return slowPeersBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto getSlowPeers(int index) {
if (slowPeersBuilder_ == null) {
return slowPeers_.get(index);
} else {
return slowPeersBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
*/
public Builder setSlowPeers(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto value) {
if (slowPeersBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureSlowPeersIsMutable();
slowPeers_.set(index, value);
onChanged();
} else {
slowPeersBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
*/
public Builder setSlowPeers(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.Builder builderForValue) {
if (slowPeersBuilder_ == null) {
ensureSlowPeersIsMutable();
slowPeers_.set(index, builderForValue.build());
onChanged();
} else {
slowPeersBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
*/
public Builder addSlowPeers(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto value) {
if (slowPeersBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureSlowPeersIsMutable();
slowPeers_.add(value);
onChanged();
} else {
slowPeersBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
*/
public Builder addSlowPeers(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto value) {
if (slowPeersBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureSlowPeersIsMutable();
slowPeers_.add(index, value);
onChanged();
} else {
slowPeersBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
*/
public Builder addSlowPeers(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.Builder builderForValue) {
if (slowPeersBuilder_ == null) {
ensureSlowPeersIsMutable();
slowPeers_.add(builderForValue.build());
onChanged();
} else {
slowPeersBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
*/
public Builder addSlowPeers(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.Builder builderForValue) {
if (slowPeersBuilder_ == null) {
ensureSlowPeersIsMutable();
slowPeers_.add(index, builderForValue.build());
onChanged();
} else {
slowPeersBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
*/
public Builder addAllSlowPeers(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto> values) {
if (slowPeersBuilder_ == null) {
ensureSlowPeersIsMutable();
super.addAll(values, slowPeers_);
onChanged();
} else {
slowPeersBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
*/
public Builder clearSlowPeers() {
if (slowPeersBuilder_ == null) {
slowPeers_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000200);
onChanged();
} else {
slowPeersBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
*/
public Builder removeSlowPeers(int index) {
if (slowPeersBuilder_ == null) {
ensureSlowPeersIsMutable();
slowPeers_.remove(index);
onChanged();
} else {
slowPeersBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.Builder getSlowPeersBuilder(
int index) {
return getSlowPeersFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProtoOrBuilder getSlowPeersOrBuilder(
int index) {
if (slowPeersBuilder_ == null) {
return slowPeers_.get(index); } else {
return slowPeersBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProtoOrBuilder>
getSlowPeersOrBuilderList() {
if (slowPeersBuilder_ != null) {
return slowPeersBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(slowPeers_);
}
}
/**
* repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.Builder addSlowPeersBuilder() {
return getSlowPeersFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.Builder addSlowPeersBuilder(
int index) {
return getSlowPeersFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;
*/
public java.util.List
getSlowPeersBuilderList() {
return getSlowPeersFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProtoOrBuilder>
getSlowPeersFieldBuilder() {
if (slowPeersBuilder_ == null) {
slowPeersBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProtoOrBuilder>(
slowPeers_,
((bitField0_ & 0x00000200) == 0x00000200),
getParentForChildren(),
isClean());
slowPeers_ = null;
}
return slowPeersBuilder_;
}
// repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
private java.util.List slowDisks_ =
java.util.Collections.emptyList();
private void ensureSlowDisksIsMutable() {
if (!((bitField0_ & 0x00000400) == 0x00000400)) {
slowDisks_ = new java.util.ArrayList(slowDisks_);
bitField0_ |= 0x00000400;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProtoOrBuilder> slowDisksBuilder_;
/**
* repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
*/
public java.util.List getSlowDisksList() {
if (slowDisksBuilder_ == null) {
return java.util.Collections.unmodifiableList(slowDisks_);
} else {
return slowDisksBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
*/
public int getSlowDisksCount() {
if (slowDisksBuilder_ == null) {
return slowDisks_.size();
} else {
return slowDisksBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto getSlowDisks(int index) {
if (slowDisksBuilder_ == null) {
return slowDisks_.get(index);
} else {
return slowDisksBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
*/
public Builder setSlowDisks(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto value) {
if (slowDisksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureSlowDisksIsMutable();
slowDisks_.set(index, value);
onChanged();
} else {
slowDisksBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
*/
public Builder setSlowDisks(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.Builder builderForValue) {
if (slowDisksBuilder_ == null) {
ensureSlowDisksIsMutable();
slowDisks_.set(index, builderForValue.build());
onChanged();
} else {
slowDisksBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
*/
public Builder addSlowDisks(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto value) {
if (slowDisksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureSlowDisksIsMutable();
slowDisks_.add(value);
onChanged();
} else {
slowDisksBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
*/
public Builder addSlowDisks(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto value) {
if (slowDisksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureSlowDisksIsMutable();
slowDisks_.add(index, value);
onChanged();
} else {
slowDisksBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
*/
public Builder addSlowDisks(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.Builder builderForValue) {
if (slowDisksBuilder_ == null) {
ensureSlowDisksIsMutable();
slowDisks_.add(builderForValue.build());
onChanged();
} else {
slowDisksBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
*/
public Builder addSlowDisks(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.Builder builderForValue) {
if (slowDisksBuilder_ == null) {
ensureSlowDisksIsMutable();
slowDisks_.add(index, builderForValue.build());
onChanged();
} else {
slowDisksBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
*/
public Builder addAllSlowDisks(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto> values) {
if (slowDisksBuilder_ == null) {
ensureSlowDisksIsMutable();
super.addAll(values, slowDisks_);
onChanged();
} else {
slowDisksBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
*/
public Builder clearSlowDisks() {
if (slowDisksBuilder_ == null) {
slowDisks_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000400);
onChanged();
} else {
slowDisksBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
*/
public Builder removeSlowDisks(int index) {
if (slowDisksBuilder_ == null) {
ensureSlowDisksIsMutable();
slowDisks_.remove(index);
onChanged();
} else {
slowDisksBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.Builder getSlowDisksBuilder(
int index) {
return getSlowDisksFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProtoOrBuilder getSlowDisksOrBuilder(
int index) {
if (slowDisksBuilder_ == null) {
return slowDisks_.get(index); } else {
return slowDisksBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProtoOrBuilder>
getSlowDisksOrBuilderList() {
if (slowDisksBuilder_ != null) {
return slowDisksBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(slowDisks_);
}
}
/**
* repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.Builder addSlowDisksBuilder() {
return getSlowDisksFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.Builder addSlowDisksBuilder(
int index) {
return getSlowDisksFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;
*/
public java.util.List
getSlowDisksBuilderList() {
return getSlowDisksFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProtoOrBuilder>
getSlowDisksFieldBuilder() {
if (slowDisksBuilder_ == null) {
slowDisksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProtoOrBuilder>(
slowDisks_,
((bitField0_ & 0x00000400) == 0x00000400),
getParentForChildren(),
isClean());
slowDisks_ = null;
}
return slowDisksBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.HeartbeatRequestProto)
}
static {
defaultInstance = new HeartbeatRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.HeartbeatRequestProto)
}
public interface HeartbeatResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
/**
* repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
*
*
* Returned commands can be null
*
*/
java.util.List
getCmdsList();
/**
* repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
*
*
* Returned commands can be null
*
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmds(int index);
/**
* repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
*
*
* Returned commands can be null
*
*/
int getCmdsCount();
/**
* repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
*
*
* Returned commands can be null
*
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder>
getCmdsOrBuilderList();
/**
* repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
*
*
* Returned commands can be null
*
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdsOrBuilder(
int index);
// required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;
/**
* required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;
*/
boolean hasHaStatus();
/**
* required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto getHaStatus();
/**
* required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProtoOrBuilder getHaStatusOrBuilder();
// optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;
/**
* optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;
*/
boolean hasRollingUpgradeStatus();
/**
* optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto getRollingUpgradeStatus();
/**
* optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder getRollingUpgradeStatusOrBuilder();
// optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;
/**
* optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;
*/
boolean hasRollingUpgradeStatusV2();
/**
* optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto getRollingUpgradeStatusV2();
/**
* optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder getRollingUpgradeStatusV2OrBuilder();
// optional uint64 fullBlockReportLeaseId = 5 [default = 0];
/**
* optional uint64 fullBlockReportLeaseId = 5 [default = 0];
*/
boolean hasFullBlockReportLeaseId();
/**
* optional uint64 fullBlockReportLeaseId = 5 [default = 0];
*/
long getFullBlockReportLeaseId();
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.HeartbeatResponseProto}
*
*
**
* cmds - Commands from namenode to datanode.
* haStatus - Status (from an HA perspective) of the NN sending this response
*
*/
public static final class HeartbeatResponseProto extends
com.google.protobuf.GeneratedMessage
implements HeartbeatResponseProtoOrBuilder {
// Use HeartbeatResponseProto.newBuilder() to construct.
private HeartbeatResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private HeartbeatResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final HeartbeatResponseProto defaultInstance;
public static HeartbeatResponseProto getDefaultInstance() {
return defaultInstance;
}
public HeartbeatResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private HeartbeatResponseProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
cmds_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
cmds_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.PARSER, extensionRegistry));
break;
}
case 18: {
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = haStatus_.toBuilder();
}
haStatus_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(haStatus_);
haStatus_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 26: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
subBuilder = rollingUpgradeStatus_.toBuilder();
}
rollingUpgradeStatus_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(rollingUpgradeStatus_);
rollingUpgradeStatus_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
case 34: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
subBuilder = rollingUpgradeStatusV2_.toBuilder();
}
rollingUpgradeStatusV2_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(rollingUpgradeStatusV2_);
rollingUpgradeStatusV2_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000004;
break;
}
case 40: {
bitField0_ |= 0x00000008;
fullBlockReportLeaseId_ = input.readUInt64();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
cmds_ = java.util.Collections.unmodifiableList(cmds_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_HeartbeatResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_HeartbeatResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public HeartbeatResponseProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new HeartbeatResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
public static final int CMDS_FIELD_NUMBER = 1;
private java.util.List cmds_;
/**
* repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
*
*
* Returned commands can be null
*
*/
public java.util.List getCmdsList() {
return cmds_;
}
/**
* repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
*
*
* Returned commands can be null
*
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder>
getCmdsOrBuilderList() {
return cmds_;
}
/**
* repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
*
*
* Returned commands can be null
*
*/
public int getCmdsCount() {
return cmds_.size();
}
/**
* repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
*
*
* Returned commands can be null
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmds(int index) {
return cmds_.get(index);
}
/**
* repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
*
*
* Returned commands can be null
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdsOrBuilder(
int index) {
return cmds_.get(index);
}
// required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;
public static final int HASTATUS_FIELD_NUMBER = 2;
private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto haStatus_;
/**
* required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;
*/
public boolean hasHaStatus() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto getHaStatus() {
return haStatus_;
}
/**
* required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProtoOrBuilder getHaStatusOrBuilder() {
return haStatus_;
}
// optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;
public static final int ROLLINGUPGRADESTATUS_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto rollingUpgradeStatus_;
/**
* optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;
*/
public boolean hasRollingUpgradeStatus() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto getRollingUpgradeStatus() {
return rollingUpgradeStatus_;
}
/**
* optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder getRollingUpgradeStatusOrBuilder() {
return rollingUpgradeStatus_;
}
// optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;
public static final int ROLLINGUPGRADESTATUSV2_FIELD_NUMBER = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto rollingUpgradeStatusV2_;
/**
* optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;
*/
public boolean hasRollingUpgradeStatusV2() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto getRollingUpgradeStatusV2() {
return rollingUpgradeStatusV2_;
}
/**
* optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder getRollingUpgradeStatusV2OrBuilder() {
return rollingUpgradeStatusV2_;
}
// optional uint64 fullBlockReportLeaseId = 5 [default = 0];
public static final int FULLBLOCKREPORTLEASEID_FIELD_NUMBER = 5;
private long fullBlockReportLeaseId_;
/**
* optional uint64 fullBlockReportLeaseId = 5 [default = 0];
*/
public boolean hasFullBlockReportLeaseId() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional uint64 fullBlockReportLeaseId = 5 [default = 0];
*/
public long getFullBlockReportLeaseId() {
return fullBlockReportLeaseId_;
}
private void initFields() {
cmds_ = java.util.Collections.emptyList();
haStatus_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.getDefaultInstance();
rollingUpgradeStatus_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance();
rollingUpgradeStatusV2_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance();
fullBlockReportLeaseId_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasHaStatus()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getCmdsCount(); i++) {
if (!getCmds(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (!getHaStatus().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
if (hasRollingUpgradeStatus()) {
if (!getRollingUpgradeStatus().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasRollingUpgradeStatusV2()) {
if (!getRollingUpgradeStatusV2().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < cmds_.size(); i++) {
output.writeMessage(1, cmds_.get(i));
}
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(2, haStatus_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeMessage(3, rollingUpgradeStatus_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeMessage(4, rollingUpgradeStatusV2_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt64(5, fullBlockReportLeaseId_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
for (int i = 0; i < cmds_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, cmds_.get(i));
}
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, haStatus_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, rollingUpgradeStatus_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(4, rollingUpgradeStatusV2_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(5, fullBlockReportLeaseId_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto) obj;
boolean result = true;
result = result && getCmdsList()
.equals(other.getCmdsList());
result = result && (hasHaStatus() == other.hasHaStatus());
if (hasHaStatus()) {
result = result && getHaStatus()
.equals(other.getHaStatus());
}
result = result && (hasRollingUpgradeStatus() == other.hasRollingUpgradeStatus());
if (hasRollingUpgradeStatus()) {
result = result && getRollingUpgradeStatus()
.equals(other.getRollingUpgradeStatus());
}
result = result && (hasRollingUpgradeStatusV2() == other.hasRollingUpgradeStatusV2());
if (hasRollingUpgradeStatusV2()) {
result = result && getRollingUpgradeStatusV2()
.equals(other.getRollingUpgradeStatusV2());
}
result = result && (hasFullBlockReportLeaseId() == other.hasFullBlockReportLeaseId());
if (hasFullBlockReportLeaseId()) {
result = result && (getFullBlockReportLeaseId()
== other.getFullBlockReportLeaseId());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (getCmdsCount() > 0) {
hash = (37 * hash) + CMDS_FIELD_NUMBER;
hash = (53 * hash) + getCmdsList().hashCode();
}
if (hasHaStatus()) {
hash = (37 * hash) + HASTATUS_FIELD_NUMBER;
hash = (53 * hash) + getHaStatus().hashCode();
}
if (hasRollingUpgradeStatus()) {
hash = (37 * hash) + ROLLINGUPGRADESTATUS_FIELD_NUMBER;
hash = (53 * hash) + getRollingUpgradeStatus().hashCode();
}
if (hasRollingUpgradeStatusV2()) {
hash = (37 * hash) + ROLLINGUPGRADESTATUSV2_FIELD_NUMBER;
hash = (53 * hash) + getRollingUpgradeStatusV2().hashCode();
}
if (hasFullBlockReportLeaseId()) {
hash = (37 * hash) + FULLBLOCKREPORTLEASEID_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getFullBlockReportLeaseId());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.HeartbeatResponseProto}
*
*
**
* cmds - Commands from namenode to datanode.
* haStatus - Status (from an HA perspective) of the NN sending this response
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_HeartbeatResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_HeartbeatResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getCmdsFieldBuilder();
getHaStatusFieldBuilder();
getRollingUpgradeStatusFieldBuilder();
getRollingUpgradeStatusV2FieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (cmdsBuilder_ == null) {
cmds_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
} else {
cmdsBuilder_.clear();
}
if (haStatusBuilder_ == null) {
haStatus_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.getDefaultInstance();
} else {
haStatusBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
if (rollingUpgradeStatusBuilder_ == null) {
rollingUpgradeStatus_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance();
} else {
rollingUpgradeStatusBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
if (rollingUpgradeStatusV2Builder_ == null) {
rollingUpgradeStatusV2_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance();
} else {
rollingUpgradeStatusV2Builder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
fullBlockReportLeaseId_ = 0L;
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_HeartbeatResponseProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (cmdsBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001)) {
cmds_ = java.util.Collections.unmodifiableList(cmds_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.cmds_ = cmds_;
} else {
result.cmds_ = cmdsBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000001;
}
if (haStatusBuilder_ == null) {
result.haStatus_ = haStatus_;
} else {
result.haStatus_ = haStatusBuilder_.build();
}
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000002;
}
if (rollingUpgradeStatusBuilder_ == null) {
result.rollingUpgradeStatus_ = rollingUpgradeStatus_;
} else {
result.rollingUpgradeStatus_ = rollingUpgradeStatusBuilder_.build();
}
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000004;
}
if (rollingUpgradeStatusV2Builder_ == null) {
result.rollingUpgradeStatusV2_ = rollingUpgradeStatusV2_;
} else {
result.rollingUpgradeStatusV2_ = rollingUpgradeStatusV2Builder_.build();
}
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000008;
}
result.fullBlockReportLeaseId_ = fullBlockReportLeaseId_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDefaultInstance()) return this;
if (cmdsBuilder_ == null) {
if (!other.cmds_.isEmpty()) {
if (cmds_.isEmpty()) {
cmds_ = other.cmds_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureCmdsIsMutable();
cmds_.addAll(other.cmds_);
}
onChanged();
}
} else {
if (!other.cmds_.isEmpty()) {
if (cmdsBuilder_.isEmpty()) {
cmdsBuilder_.dispose();
cmdsBuilder_ = null;
cmds_ = other.cmds_;
bitField0_ = (bitField0_ & ~0x00000001);
cmdsBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getCmdsFieldBuilder() : null;
} else {
cmdsBuilder_.addAllMessages(other.cmds_);
}
}
}
if (other.hasHaStatus()) {
mergeHaStatus(other.getHaStatus());
}
if (other.hasRollingUpgradeStatus()) {
mergeRollingUpgradeStatus(other.getRollingUpgradeStatus());
}
if (other.hasRollingUpgradeStatusV2()) {
mergeRollingUpgradeStatusV2(other.getRollingUpgradeStatusV2());
}
if (other.hasFullBlockReportLeaseId()) {
setFullBlockReportLeaseId(other.getFullBlockReportLeaseId());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasHaStatus()) {
return false;
}
for (int i = 0; i < getCmdsCount(); i++) {
if (!getCmds(i).isInitialized()) {
return false;
}
}
if (!getHaStatus().isInitialized()) {
return false;
}
if (hasRollingUpgradeStatus()) {
if (!getRollingUpgradeStatus().isInitialized()) {
return false;
}
}
if (hasRollingUpgradeStatusV2()) {
if (!getRollingUpgradeStatusV2().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
private java.util.List cmds_ =
java.util.Collections.emptyList();
private void ensureCmdsIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
cmds_ = new java.util.ArrayList(cmds_);
bitField0_ |= 0x00000001;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder> cmdsBuilder_;
/**
* repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
*
*
* Returned commands can be null
*
*/
public java.util.List getCmdsList() {
if (cmdsBuilder_ == null) {
return java.util.Collections.unmodifiableList(cmds_);
} else {
return cmdsBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
*
*
* Returned commands can be null
*
*/
public int getCmdsCount() {
if (cmdsBuilder_ == null) {
return cmds_.size();
} else {
return cmdsBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
*
*
* Returned commands can be null
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmds(int index) {
if (cmdsBuilder_ == null) {
return cmds_.get(index);
} else {
return cmdsBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
*
*
* Returned commands can be null
*
*/
public Builder setCmds(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto value) {
if (cmdsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureCmdsIsMutable();
cmds_.set(index, value);
onChanged();
} else {
cmdsBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
*
*
* Returned commands can be null
*
*/
public Builder setCmds(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder builderForValue) {
if (cmdsBuilder_ == null) {
ensureCmdsIsMutable();
cmds_.set(index, builderForValue.build());
onChanged();
} else {
cmdsBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
*
*
* Returned commands can be null
*
*/
public Builder addCmds(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto value) {
if (cmdsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureCmdsIsMutable();
cmds_.add(value);
onChanged();
} else {
cmdsBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
*
*
* Returned commands can be null
*
*/
public Builder addCmds(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto value) {
if (cmdsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureCmdsIsMutable();
cmds_.add(index, value);
onChanged();
} else {
cmdsBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
*
*
* Returned commands can be null
*
*/
public Builder addCmds(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder builderForValue) {
if (cmdsBuilder_ == null) {
ensureCmdsIsMutable();
cmds_.add(builderForValue.build());
onChanged();
} else {
cmdsBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
*
*
* Returned commands can be null
*
*/
public Builder addCmds(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder builderForValue) {
if (cmdsBuilder_ == null) {
ensureCmdsIsMutable();
cmds_.add(index, builderForValue.build());
onChanged();
} else {
cmdsBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
*
*
* Returned commands can be null
*
*/
public Builder addAllCmds(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto> values) {
if (cmdsBuilder_ == null) {
ensureCmdsIsMutable();
super.addAll(values, cmds_);
onChanged();
} else {
cmdsBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
*
*
* Returned commands can be null
*
*/
public Builder clearCmds() {
if (cmdsBuilder_ == null) {
cmds_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
} else {
cmdsBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
*
*
* Returned commands can be null
*
*/
public Builder removeCmds(int index) {
if (cmdsBuilder_ == null) {
ensureCmdsIsMutable();
cmds_.remove(index);
onChanged();
} else {
cmdsBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
*
*
* Returned commands can be null
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder getCmdsBuilder(
int index) {
return getCmdsFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
*
*
* Returned commands can be null
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdsOrBuilder(
int index) {
if (cmdsBuilder_ == null) {
return cmds_.get(index); } else {
return cmdsBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
*
*
* Returned commands can be null
*
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder>
getCmdsOrBuilderList() {
if (cmdsBuilder_ != null) {
return cmdsBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(cmds_);
}
}
/**
* repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
*
*
* Returned commands can be null
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder addCmdsBuilder() {
return getCmdsFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
*
*
* Returned commands can be null
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder addCmdsBuilder(
int index) {
return getCmdsFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;
*
*
* Returned commands can be null
*
*/
public java.util.List
getCmdsBuilderList() {
return getCmdsFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder>
getCmdsFieldBuilder() {
if (cmdsBuilder_ == null) {
cmdsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder>(
cmds_,
((bitField0_ & 0x00000001) == 0x00000001),
getParentForChildren(),
isClean());
cmds_ = null;
}
return cmdsBuilder_;
}
// required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;
private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto haStatus_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProtoOrBuilder> haStatusBuilder_;
/**
* required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;
*/
public boolean hasHaStatus() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto getHaStatus() {
if (haStatusBuilder_ == null) {
return haStatus_;
} else {
return haStatusBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;
*/
public Builder setHaStatus(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto value) {
if (haStatusBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
haStatus_ = value;
onChanged();
} else {
haStatusBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;
*/
public Builder setHaStatus(
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.Builder builderForValue) {
if (haStatusBuilder_ == null) {
haStatus_ = builderForValue.build();
onChanged();
} else {
haStatusBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
return this;
}
/**
* required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;
*/
public Builder mergeHaStatus(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto value) {
if (haStatusBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002) &&
haStatus_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.getDefaultInstance()) {
haStatus_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.newBuilder(haStatus_).mergeFrom(value).buildPartial();
} else {
haStatus_ = value;
}
onChanged();
} else {
haStatusBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;
*/
public Builder clearHaStatus() {
if (haStatusBuilder_ == null) {
haStatus_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.getDefaultInstance();
onChanged();
} else {
haStatusBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
/**
* required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.Builder getHaStatusBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getHaStatusFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProtoOrBuilder getHaStatusOrBuilder() {
if (haStatusBuilder_ != null) {
return haStatusBuilder_.getMessageOrBuilder();
} else {
return haStatus_;
}
}
/**
* required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProtoOrBuilder>
getHaStatusFieldBuilder() {
if (haStatusBuilder_ == null) {
haStatusBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProtoOrBuilder>(
haStatus_,
getParentForChildren(),
isClean());
haStatus_ = null;
}
return haStatusBuilder_;
}
// optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto rollingUpgradeStatus_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder> rollingUpgradeStatusBuilder_;
/**
* optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;
*/
public boolean hasRollingUpgradeStatus() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto getRollingUpgradeStatus() {
if (rollingUpgradeStatusBuilder_ == null) {
return rollingUpgradeStatus_;
} else {
return rollingUpgradeStatusBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;
*/
public Builder setRollingUpgradeStatus(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto value) {
if (rollingUpgradeStatusBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
rollingUpgradeStatus_ = value;
onChanged();
} else {
rollingUpgradeStatusBuilder_.setMessage(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;
*/
public Builder setRollingUpgradeStatus(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder builderForValue) {
if (rollingUpgradeStatusBuilder_ == null) {
rollingUpgradeStatus_ = builderForValue.build();
onChanged();
} else {
rollingUpgradeStatusBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;
*/
public Builder mergeRollingUpgradeStatus(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto value) {
if (rollingUpgradeStatusBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004) &&
rollingUpgradeStatus_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance()) {
rollingUpgradeStatus_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.newBuilder(rollingUpgradeStatus_).mergeFrom(value).buildPartial();
} else {
rollingUpgradeStatus_ = value;
}
onChanged();
} else {
rollingUpgradeStatusBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;
*/
public Builder clearRollingUpgradeStatus() {
if (rollingUpgradeStatusBuilder_ == null) {
rollingUpgradeStatus_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance();
onChanged();
} else {
rollingUpgradeStatusBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
/**
* optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder getRollingUpgradeStatusBuilder() {
bitField0_ |= 0x00000004;
onChanged();
return getRollingUpgradeStatusFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder getRollingUpgradeStatusOrBuilder() {
if (rollingUpgradeStatusBuilder_ != null) {
return rollingUpgradeStatusBuilder_.getMessageOrBuilder();
} else {
return rollingUpgradeStatus_;
}
}
/**
* optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder>
getRollingUpgradeStatusFieldBuilder() {
if (rollingUpgradeStatusBuilder_ == null) {
rollingUpgradeStatusBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder>(
rollingUpgradeStatus_,
getParentForChildren(),
isClean());
rollingUpgradeStatus_ = null;
}
return rollingUpgradeStatusBuilder_;
}
// optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto rollingUpgradeStatusV2_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder> rollingUpgradeStatusV2Builder_;
/**
* optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;
*/
public boolean hasRollingUpgradeStatusV2() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto getRollingUpgradeStatusV2() {
if (rollingUpgradeStatusV2Builder_ == null) {
return rollingUpgradeStatusV2_;
} else {
return rollingUpgradeStatusV2Builder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;
*/
public Builder setRollingUpgradeStatusV2(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto value) {
if (rollingUpgradeStatusV2Builder_ == null) {
if (value == null) {
throw new NullPointerException();
}
rollingUpgradeStatusV2_ = value;
onChanged();
} else {
rollingUpgradeStatusV2Builder_.setMessage(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;
*/
public Builder setRollingUpgradeStatusV2(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder builderForValue) {
if (rollingUpgradeStatusV2Builder_ == null) {
rollingUpgradeStatusV2_ = builderForValue.build();
onChanged();
} else {
rollingUpgradeStatusV2Builder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;
*/
public Builder mergeRollingUpgradeStatusV2(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto value) {
if (rollingUpgradeStatusV2Builder_ == null) {
if (((bitField0_ & 0x00000008) == 0x00000008) &&
rollingUpgradeStatusV2_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance()) {
rollingUpgradeStatusV2_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.newBuilder(rollingUpgradeStatusV2_).mergeFrom(value).buildPartial();
} else {
rollingUpgradeStatusV2_ = value;
}
onChanged();
} else {
rollingUpgradeStatusV2Builder_.mergeFrom(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;
*/
public Builder clearRollingUpgradeStatusV2() {
if (rollingUpgradeStatusV2Builder_ == null) {
rollingUpgradeStatusV2_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance();
onChanged();
} else {
rollingUpgradeStatusV2Builder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
/**
* optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder getRollingUpgradeStatusV2Builder() {
bitField0_ |= 0x00000008;
onChanged();
return getRollingUpgradeStatusV2FieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder getRollingUpgradeStatusV2OrBuilder() {
if (rollingUpgradeStatusV2Builder_ != null) {
return rollingUpgradeStatusV2Builder_.getMessageOrBuilder();
} else {
return rollingUpgradeStatusV2_;
}
}
/**
* optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder>
getRollingUpgradeStatusV2FieldBuilder() {
if (rollingUpgradeStatusV2Builder_ == null) {
rollingUpgradeStatusV2Builder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder>(
rollingUpgradeStatusV2_,
getParentForChildren(),
isClean());
rollingUpgradeStatusV2_ = null;
}
return rollingUpgradeStatusV2Builder_;
}
// optional uint64 fullBlockReportLeaseId = 5 [default = 0];
private long fullBlockReportLeaseId_ ;
/**
* optional uint64 fullBlockReportLeaseId = 5 [default = 0];
*/
public boolean hasFullBlockReportLeaseId() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional uint64 fullBlockReportLeaseId = 5 [default = 0];
*/
public long getFullBlockReportLeaseId() {
return fullBlockReportLeaseId_;
}
/**
* optional uint64 fullBlockReportLeaseId = 5 [default = 0];
*/
public Builder setFullBlockReportLeaseId(long value) {
bitField0_ |= 0x00000010;
fullBlockReportLeaseId_ = value;
onChanged();
return this;
}
/**
* optional uint64 fullBlockReportLeaseId = 5 [default = 0];
*/
public Builder clearFullBlockReportLeaseId() {
bitField0_ = (bitField0_ & ~0x00000010);
fullBlockReportLeaseId_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.HeartbeatResponseProto)
}
static {
defaultInstance = new HeartbeatResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.HeartbeatResponseProto)
}
public interface BlockReportRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
boolean hasRegistration();
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration();
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder();
// required string blockPoolId = 2;
/**
* required string blockPoolId = 2;
*/
boolean hasBlockPoolId();
/**
* required string blockPoolId = 2;
*/
java.lang.String getBlockPoolId();
/**
* required string blockPoolId = 2;
*/
com.google.protobuf.ByteString
getBlockPoolIdBytes();
// repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
/**
* repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
*/
java.util.List
getReportsList();
/**
* repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto getReports(int index);
/**
* repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
*/
int getReportsCount();
/**
* repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProtoOrBuilder>
getReportsOrBuilderList();
/**
* repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProtoOrBuilder getReportsOrBuilder(
int index);
// optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;
/**
* optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;
*/
boolean hasContext();
/**
* optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto getContext();
/**
* optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProtoOrBuilder getContextOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.BlockReportRequestProto}
*
*
**
* registration - datanode registration information
* blockPoolID - block pool ID of the reported blocks
* blocks - each block is represented as multiple longs in the array.
* first long represents block ID
* second long represents length
* third long represents gen stamp
* fourth long (if under construction) represents replica state
* context - An optional field containing information about the context
* of this block report.
*
*/
public static final class BlockReportRequestProto extends
com.google.protobuf.GeneratedMessage
implements BlockReportRequestProtoOrBuilder {
// Use BlockReportRequestProto.newBuilder() to construct.
private BlockReportRequestProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private BlockReportRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final BlockReportRequestProto defaultInstance;
public static BlockReportRequestProto getDefaultInstance() {
return defaultInstance;
}
public BlockReportRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private BlockReportRequestProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = registration_.toBuilder();
}
registration_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(registration_);
registration_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
bitField0_ |= 0x00000002;
blockPoolId_ = input.readBytes();
break;
}
case 26: {
if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
reports_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000004;
}
reports_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.PARSER, extensionRegistry));
break;
}
case 34: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
subBuilder = context_.toBuilder();
}
context_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(context_);
context_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000004;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
reports_ = java.util.Collections.unmodifiableList(reports_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReportRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReportRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public BlockReportRequestProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new BlockReportRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
public static final int REGISTRATION_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_;
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public boolean hasRegistration() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
return registration_;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
return registration_;
}
// required string blockPoolId = 2;
public static final int BLOCKPOOLID_FIELD_NUMBER = 2;
private java.lang.Object blockPoolId_;
/**
* required string blockPoolId = 2;
*/
public boolean hasBlockPoolId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string blockPoolId = 2;
*/
public java.lang.String getBlockPoolId() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
blockPoolId_ = s;
}
return s;
}
}
/**
* required string blockPoolId = 2;
*/
public com.google.protobuf.ByteString
getBlockPoolIdBytes() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
blockPoolId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
public static final int REPORTS_FIELD_NUMBER = 3;
private java.util.List reports_;
/**
* repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
*/
public java.util.List getReportsList() {
return reports_;
}
/**
* repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProtoOrBuilder>
getReportsOrBuilderList() {
return reports_;
}
/**
* repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
*/
public int getReportsCount() {
return reports_.size();
}
/**
* repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto getReports(int index) {
return reports_.get(index);
}
/**
* repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProtoOrBuilder getReportsOrBuilder(
int index) {
return reports_.get(index);
}
// optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;
public static final int CONTEXT_FIELD_NUMBER = 4;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto context_;
/**
* optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;
*/
public boolean hasContext() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto getContext() {
return context_;
}
/**
* optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProtoOrBuilder getContextOrBuilder() {
return context_;
}
private void initFields() {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
blockPoolId_ = "";
reports_ = java.util.Collections.emptyList();
context_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasRegistration()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasBlockPoolId()) {
memoizedIsInitialized = 0;
return false;
}
if (!getRegistration().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getReportsCount(); i++) {
if (!getReports(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasContext()) {
if (!getContext().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, registration_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getBlockPoolIdBytes());
}
for (int i = 0; i < reports_.size(); i++) {
output.writeMessage(3, reports_.get(i));
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeMessage(4, context_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, registration_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getBlockPoolIdBytes());
}
for (int i = 0; i < reports_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, reports_.get(i));
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(4, context_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto) obj;
boolean result = true;
result = result && (hasRegistration() == other.hasRegistration());
if (hasRegistration()) {
result = result && getRegistration()
.equals(other.getRegistration());
}
result = result && (hasBlockPoolId() == other.hasBlockPoolId());
if (hasBlockPoolId()) {
result = result && getBlockPoolId()
.equals(other.getBlockPoolId());
}
result = result && getReportsList()
.equals(other.getReportsList());
result = result && (hasContext() == other.hasContext());
if (hasContext()) {
result = result && getContext()
.equals(other.getContext());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasRegistration()) {
hash = (37 * hash) + REGISTRATION_FIELD_NUMBER;
hash = (53 * hash) + getRegistration().hashCode();
}
if (hasBlockPoolId()) {
hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER;
hash = (53 * hash) + getBlockPoolId().hashCode();
}
if (getReportsCount() > 0) {
hash = (37 * hash) + REPORTS_FIELD_NUMBER;
hash = (53 * hash) + getReportsList().hashCode();
}
if (hasContext()) {
hash = (37 * hash) + CONTEXT_FIELD_NUMBER;
hash = (53 * hash) + getContext().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.BlockReportRequestProto}
*
*
**
* registration - datanode registration information
* blockPoolID - block pool ID of the reported blocks
* blocks - each block is represented as multiple longs in the array.
* first long represents block ID
* second long represents length
* third long represents gen stamp
* fourth long (if under construction) represents replica state
* context - An optional field containing information about the context
* of this block report.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReportRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReportRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getRegistrationFieldBuilder();
getReportsFieldBuilder();
getContextFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (registrationBuilder_ == null) {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
} else {
registrationBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
blockPoolId_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
if (reportsBuilder_ == null) {
reports_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
} else {
reportsBuilder_.clear();
}
if (contextBuilder_ == null) {
context_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.getDefaultInstance();
} else {
contextBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReportRequestProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (registrationBuilder_ == null) {
result.registration_ = registration_;
} else {
result.registration_ = registrationBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.blockPoolId_ = blockPoolId_;
if (reportsBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004)) {
reports_ = java.util.Collections.unmodifiableList(reports_);
bitField0_ = (bitField0_ & ~0x00000004);
}
result.reports_ = reports_;
} else {
result.reports_ = reportsBuilder_.build();
}
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000004;
}
if (contextBuilder_ == null) {
result.context_ = context_;
} else {
result.context_ = contextBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.getDefaultInstance()) return this;
if (other.hasRegistration()) {
mergeRegistration(other.getRegistration());
}
if (other.hasBlockPoolId()) {
bitField0_ |= 0x00000002;
blockPoolId_ = other.blockPoolId_;
onChanged();
}
if (reportsBuilder_ == null) {
if (!other.reports_.isEmpty()) {
if (reports_.isEmpty()) {
reports_ = other.reports_;
bitField0_ = (bitField0_ & ~0x00000004);
} else {
ensureReportsIsMutable();
reports_.addAll(other.reports_);
}
onChanged();
}
} else {
if (!other.reports_.isEmpty()) {
if (reportsBuilder_.isEmpty()) {
reportsBuilder_.dispose();
reportsBuilder_ = null;
reports_ = other.reports_;
bitField0_ = (bitField0_ & ~0x00000004);
reportsBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getReportsFieldBuilder() : null;
} else {
reportsBuilder_.addAllMessages(other.reports_);
}
}
}
if (other.hasContext()) {
mergeContext(other.getContext());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasRegistration()) {
return false;
}
if (!hasBlockPoolId()) {
return false;
}
if (!getRegistration().isInitialized()) {
return false;
}
for (int i = 0; i < getReportsCount(); i++) {
if (!getReports(i).isInitialized()) {
return false;
}
}
if (hasContext()) {
if (!getContext().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registrationBuilder_;
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public boolean hasRegistration() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
if (registrationBuilder_ == null) {
return registration_;
} else {
return registrationBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
if (registrationBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
registration_ = value;
onChanged();
} else {
registrationBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public Builder setRegistration(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) {
if (registrationBuilder_ == null) {
registration_ = builderForValue.build();
onChanged();
} else {
registrationBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
if (registrationBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
registration_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) {
registration_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial();
} else {
registration_ = value;
}
onChanged();
} else {
registrationBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public Builder clearRegistration() {
if (registrationBuilder_ == null) {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
onChanged();
} else {
registrationBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistrationBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getRegistrationFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
if (registrationBuilder_ != null) {
return registrationBuilder_.getMessageOrBuilder();
} else {
return registration_;
}
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>
getRegistrationFieldBuilder() {
if (registrationBuilder_ == null) {
registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>(
registration_,
getParentForChildren(),
isClean());
registration_ = null;
}
return registrationBuilder_;
}
// required string blockPoolId = 2;
private java.lang.Object blockPoolId_ = "";
/**
* required string blockPoolId = 2;
*/
public boolean hasBlockPoolId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string blockPoolId = 2;
*/
public java.lang.String getBlockPoolId() {
java.lang.Object ref = blockPoolId_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
blockPoolId_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string blockPoolId = 2;
*/
public com.google.protobuf.ByteString
getBlockPoolIdBytes() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
blockPoolId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string blockPoolId = 2;
*/
public Builder setBlockPoolId(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
blockPoolId_ = value;
onChanged();
return this;
}
/**
* required string blockPoolId = 2;
*/
public Builder clearBlockPoolId() {
bitField0_ = (bitField0_ & ~0x00000002);
blockPoolId_ = getDefaultInstance().getBlockPoolId();
onChanged();
return this;
}
/**
* required string blockPoolId = 2;
*/
public Builder setBlockPoolIdBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
blockPoolId_ = value;
onChanged();
return this;
}
// repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
private java.util.List reports_ =
java.util.Collections.emptyList();
private void ensureReportsIsMutable() {
if (!((bitField0_ & 0x00000004) == 0x00000004)) {
reports_ = new java.util.ArrayList(reports_);
bitField0_ |= 0x00000004;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProtoOrBuilder> reportsBuilder_;
/**
* repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
*/
public java.util.List getReportsList() {
if (reportsBuilder_ == null) {
return java.util.Collections.unmodifiableList(reports_);
} else {
return reportsBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
*/
public int getReportsCount() {
if (reportsBuilder_ == null) {
return reports_.size();
} else {
return reportsBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto getReports(int index) {
if (reportsBuilder_ == null) {
return reports_.get(index);
} else {
return reportsBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
*/
public Builder setReports(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto value) {
if (reportsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureReportsIsMutable();
reports_.set(index, value);
onChanged();
} else {
reportsBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
*/
public Builder setReports(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder builderForValue) {
if (reportsBuilder_ == null) {
ensureReportsIsMutable();
reports_.set(index, builderForValue.build());
onChanged();
} else {
reportsBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
*/
public Builder addReports(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto value) {
if (reportsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureReportsIsMutable();
reports_.add(value);
onChanged();
} else {
reportsBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
*/
public Builder addReports(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto value) {
if (reportsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureReportsIsMutable();
reports_.add(index, value);
onChanged();
} else {
reportsBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
*/
public Builder addReports(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder builderForValue) {
if (reportsBuilder_ == null) {
ensureReportsIsMutable();
reports_.add(builderForValue.build());
onChanged();
} else {
reportsBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
*/
public Builder addReports(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder builderForValue) {
if (reportsBuilder_ == null) {
ensureReportsIsMutable();
reports_.add(index, builderForValue.build());
onChanged();
} else {
reportsBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
*/
public Builder addAllReports(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto> values) {
if (reportsBuilder_ == null) {
ensureReportsIsMutable();
super.addAll(values, reports_);
onChanged();
} else {
reportsBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
*/
public Builder clearReports() {
if (reportsBuilder_ == null) {
reports_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
onChanged();
} else {
reportsBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
*/
public Builder removeReports(int index) {
if (reportsBuilder_ == null) {
ensureReportsIsMutable();
reports_.remove(index);
onChanged();
} else {
reportsBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder getReportsBuilder(
int index) {
return getReportsFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProtoOrBuilder getReportsOrBuilder(
int index) {
if (reportsBuilder_ == null) {
return reports_.get(index); } else {
return reportsBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProtoOrBuilder>
getReportsOrBuilderList() {
if (reportsBuilder_ != null) {
return reportsBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(reports_);
}
}
/**
* repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder addReportsBuilder() {
return getReportsFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder addReportsBuilder(
int index) {
return getReportsFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;
*/
public java.util.List
getReportsBuilderList() {
return getReportsFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProtoOrBuilder>
getReportsFieldBuilder() {
if (reportsBuilder_ == null) {
reportsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProtoOrBuilder>(
reports_,
((bitField0_ & 0x00000004) == 0x00000004),
getParentForChildren(),
isClean());
reports_ = null;
}
return reportsBuilder_;
}
// optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto context_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProtoOrBuilder> contextBuilder_;
/**
* optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;
*/
public boolean hasContext() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto getContext() {
if (contextBuilder_ == null) {
return context_;
} else {
return contextBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;
*/
public Builder setContext(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto value) {
if (contextBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
context_ = value;
onChanged();
} else {
contextBuilder_.setMessage(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;
*/
public Builder setContext(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.Builder builderForValue) {
if (contextBuilder_ == null) {
context_ = builderForValue.build();
onChanged();
} else {
contextBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;
*/
public Builder mergeContext(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto value) {
if (contextBuilder_ == null) {
if (((bitField0_ & 0x00000008) == 0x00000008) &&
context_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.getDefaultInstance()) {
context_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.newBuilder(context_).mergeFrom(value).buildPartial();
} else {
context_ = value;
}
onChanged();
} else {
contextBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;
*/
public Builder clearContext() {
if (contextBuilder_ == null) {
context_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.getDefaultInstance();
onChanged();
} else {
contextBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
/**
* optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.Builder getContextBuilder() {
bitField0_ |= 0x00000008;
onChanged();
return getContextFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProtoOrBuilder getContextOrBuilder() {
if (contextBuilder_ != null) {
return contextBuilder_.getMessageOrBuilder();
} else {
return context_;
}
}
/**
* optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProtoOrBuilder>
getContextFieldBuilder() {
if (contextBuilder_ == null) {
contextBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProtoOrBuilder>(
context_,
getParentForChildren(),
isClean());
context_ = null;
}
return contextBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.BlockReportRequestProto)
}
static {
defaultInstance = new BlockReportRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.BlockReportRequestProto)
}
public interface BlockReportContextProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required int32 totalRpcs = 1;
/**
* required int32 totalRpcs = 1;
*
*
* The total number of RPCs this block report is broken into.
*
*/
boolean hasTotalRpcs();
/**
* required int32 totalRpcs = 1;
*
*
* The total number of RPCs this block report is broken into.
*
*/
int getTotalRpcs();
// required int32 curRpc = 2;
/**
* required int32 curRpc = 2;
*
*
* The index of the current RPC (zero-based)
*
*/
boolean hasCurRpc();
/**
* required int32 curRpc = 2;
*
*
* The index of the current RPC (zero-based)
*
*/
int getCurRpc();
// required int64 id = 3;
/**
* required int64 id = 3;
*
*
* The unique 64-bit ID of this block report
*
*/
boolean hasId();
/**
* required int64 id = 3;
*
*
* The unique 64-bit ID of this block report
*
*/
long getId();
// optional uint64 leaseId = 4 [default = 0];
/**
* optional uint64 leaseId = 4 [default = 0];
*
*
* The block report lease ID, or 0 if we are sending without a lease to
* bypass rate-limiting.
*
*/
boolean hasLeaseId();
/**
* optional uint64 leaseId = 4 [default = 0];
*
*
* The block report lease ID, or 0 if we are sending without a lease to
* bypass rate-limiting.
*
*/
long getLeaseId();
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.BlockReportContextProto}
*/
public static final class BlockReportContextProto extends
com.google.protobuf.GeneratedMessage
implements BlockReportContextProtoOrBuilder {
// Use BlockReportContextProto.newBuilder() to construct.
private BlockReportContextProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private BlockReportContextProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final BlockReportContextProto defaultInstance;
public static BlockReportContextProto getDefaultInstance() {
return defaultInstance;
}
public BlockReportContextProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private BlockReportContextProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
totalRpcs_ = input.readInt32();
break;
}
case 16: {
bitField0_ |= 0x00000002;
curRpc_ = input.readInt32();
break;
}
case 24: {
bitField0_ |= 0x00000004;
id_ = input.readInt64();
break;
}
case 32: {
bitField0_ |= 0x00000008;
leaseId_ = input.readUInt64();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReportContextProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReportContextProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public BlockReportContextProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new BlockReportContextProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required int32 totalRpcs = 1;
public static final int TOTALRPCS_FIELD_NUMBER = 1;
private int totalRpcs_;
/**
* required int32 totalRpcs = 1;
*
*
* The total number of RPCs this block report is broken into.
*
*/
public boolean hasTotalRpcs() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required int32 totalRpcs = 1;
*
*
* The total number of RPCs this block report is broken into.
*
*/
public int getTotalRpcs() {
return totalRpcs_;
}
// required int32 curRpc = 2;
public static final int CURRPC_FIELD_NUMBER = 2;
private int curRpc_;
/**
* required int32 curRpc = 2;
*
*
* The index of the current RPC (zero-based)
*
*/
public boolean hasCurRpc() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required int32 curRpc = 2;
*
*
* The index of the current RPC (zero-based)
*
*/
public int getCurRpc() {
return curRpc_;
}
// required int64 id = 3;
public static final int ID_FIELD_NUMBER = 3;
private long id_;
/**
* required int64 id = 3;
*
*
* The unique 64-bit ID of this block report
*
*/
public boolean hasId() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required int64 id = 3;
*
*
* The unique 64-bit ID of this block report
*
*/
public long getId() {
return id_;
}
// optional uint64 leaseId = 4 [default = 0];
public static final int LEASEID_FIELD_NUMBER = 4;
private long leaseId_;
/**
* optional uint64 leaseId = 4 [default = 0];
*
*
* The block report lease ID, or 0 if we are sending without a lease to
* bypass rate-limiting.
*
*/
public boolean hasLeaseId() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional uint64 leaseId = 4 [default = 0];
*
*
* The block report lease ID, or 0 if we are sending without a lease to
* bypass rate-limiting.
*
*/
public long getLeaseId() {
return leaseId_;
}
private void initFields() {
totalRpcs_ = 0;
curRpc_ = 0;
id_ = 0L;
leaseId_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasTotalRpcs()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasCurRpc()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasId()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeInt32(1, totalRpcs_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeInt32(2, curRpc_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeInt64(3, id_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt64(4, leaseId_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(1, totalRpcs_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(2, curRpc_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(3, id_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(4, leaseId_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto) obj;
boolean result = true;
result = result && (hasTotalRpcs() == other.hasTotalRpcs());
if (hasTotalRpcs()) {
result = result && (getTotalRpcs()
== other.getTotalRpcs());
}
result = result && (hasCurRpc() == other.hasCurRpc());
if (hasCurRpc()) {
result = result && (getCurRpc()
== other.getCurRpc());
}
result = result && (hasId() == other.hasId());
if (hasId()) {
result = result && (getId()
== other.getId());
}
result = result && (hasLeaseId() == other.hasLeaseId());
if (hasLeaseId()) {
result = result && (getLeaseId()
== other.getLeaseId());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasTotalRpcs()) {
hash = (37 * hash) + TOTALRPCS_FIELD_NUMBER;
hash = (53 * hash) + getTotalRpcs();
}
if (hasCurRpc()) {
hash = (37 * hash) + CURRPC_FIELD_NUMBER;
hash = (53 * hash) + getCurRpc();
}
if (hasId()) {
hash = (37 * hash) + ID_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getId());
}
if (hasLeaseId()) {
hash = (37 * hash) + LEASEID_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getLeaseId());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.BlockReportContextProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReportContextProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReportContextProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
totalRpcs_ = 0;
bitField0_ = (bitField0_ & ~0x00000001);
curRpc_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
id_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
leaseId_ = 0L;
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReportContextProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.totalRpcs_ = totalRpcs_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.curRpc_ = curRpc_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.id_ = id_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.leaseId_ = leaseId_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.getDefaultInstance()) return this;
if (other.hasTotalRpcs()) {
setTotalRpcs(other.getTotalRpcs());
}
if (other.hasCurRpc()) {
setCurRpc(other.getCurRpc());
}
if (other.hasId()) {
setId(other.getId());
}
if (other.hasLeaseId()) {
setLeaseId(other.getLeaseId());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasTotalRpcs()) {
return false;
}
if (!hasCurRpc()) {
return false;
}
if (!hasId()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required int32 totalRpcs = 1;
private int totalRpcs_ ;
/**
* required int32 totalRpcs = 1;
*
*
* The total number of RPCs this block report is broken into.
*
*/
public boolean hasTotalRpcs() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required int32 totalRpcs = 1;
*
*
* The total number of RPCs this block report is broken into.
*
*/
public int getTotalRpcs() {
return totalRpcs_;
}
/**
* required int32 totalRpcs = 1;
*
*
* The total number of RPCs this block report is broken into.
*
*/
public Builder setTotalRpcs(int value) {
bitField0_ |= 0x00000001;
totalRpcs_ = value;
onChanged();
return this;
}
/**
* required int32 totalRpcs = 1;
*
*
* The total number of RPCs this block report is broken into.
*
*/
public Builder clearTotalRpcs() {
bitField0_ = (bitField0_ & ~0x00000001);
totalRpcs_ = 0;
onChanged();
return this;
}
// required int32 curRpc = 2;
private int curRpc_ ;
/**
* required int32 curRpc = 2;
*
*
* The index of the current RPC (zero-based)
*
*/
public boolean hasCurRpc() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required int32 curRpc = 2;
*
*
* The index of the current RPC (zero-based)
*
*/
public int getCurRpc() {
return curRpc_;
}
/**
* required int32 curRpc = 2;
*
*
* The index of the current RPC (zero-based)
*
*/
public Builder setCurRpc(int value) {
bitField0_ |= 0x00000002;
curRpc_ = value;
onChanged();
return this;
}
/**
* required int32 curRpc = 2;
*
*
* The index of the current RPC (zero-based)
*
*/
public Builder clearCurRpc() {
bitField0_ = (bitField0_ & ~0x00000002);
curRpc_ = 0;
onChanged();
return this;
}
// required int64 id = 3;
private long id_ ;
/**
* required int64 id = 3;
*
*
* The unique 64-bit ID of this block report
*
*/
public boolean hasId() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required int64 id = 3;
*
*
* The unique 64-bit ID of this block report
*
*/
public long getId() {
return id_;
}
/**
* required int64 id = 3;
*
*
* The unique 64-bit ID of this block report
*
*/
public Builder setId(long value) {
bitField0_ |= 0x00000004;
id_ = value;
onChanged();
return this;
}
/**
* required int64 id = 3;
*
*
* The unique 64-bit ID of this block report
*
*/
public Builder clearId() {
bitField0_ = (bitField0_ & ~0x00000004);
id_ = 0L;
onChanged();
return this;
}
// optional uint64 leaseId = 4 [default = 0];
private long leaseId_ ;
/**
* optional uint64 leaseId = 4 [default = 0];
*
*
* The block report lease ID, or 0 if we are sending without a lease to
* bypass rate-limiting.
*
*/
public boolean hasLeaseId() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional uint64 leaseId = 4 [default = 0];
*
*
* The block report lease ID, or 0 if we are sending without a lease to
* bypass rate-limiting.
*
*/
public long getLeaseId() {
return leaseId_;
}
/**
* optional uint64 leaseId = 4 [default = 0];
*
*
* The block report lease ID, or 0 if we are sending without a lease to
* bypass rate-limiting.
*
*/
public Builder setLeaseId(long value) {
bitField0_ |= 0x00000008;
leaseId_ = value;
onChanged();
return this;
}
/**
* optional uint64 leaseId = 4 [default = 0];
*
*
* The block report lease ID, or 0 if we are sending without a lease to
* bypass rate-limiting.
*
*/
public Builder clearLeaseId() {
bitField0_ = (bitField0_ & ~0x00000008);
leaseId_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.BlockReportContextProto)
}
static {
defaultInstance = new BlockReportContextProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.BlockReportContextProto)
}
public interface StorageBlockReportProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.DatanodeStorageProto storage = 1;
/**
* required .hadoop.hdfs.DatanodeStorageProto storage = 1;
*
*
* Storage
*
*/
boolean hasStorage();
/**
* required .hadoop.hdfs.DatanodeStorageProto storage = 1;
*
*
* Storage
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getStorage();
/**
* required .hadoop.hdfs.DatanodeStorageProto storage = 1;
*
*
* Storage
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder();
// repeated uint64 blocks = 2 [packed = true];
/**
* repeated uint64 blocks = 2 [packed = true];
*/
java.util.List getBlocksList();
/**
* repeated uint64 blocks = 2 [packed = true];
*/
int getBlocksCount();
/**
* repeated uint64 blocks = 2 [packed = true];
*/
long getBlocks(int index);
// optional uint64 numberOfBlocks = 3;
/**
* optional uint64 numberOfBlocks = 3;
*/
boolean hasNumberOfBlocks();
/**
* optional uint64 numberOfBlocks = 3;
*/
long getNumberOfBlocks();
// repeated bytes blocksBuffers = 4;
/**
* repeated bytes blocksBuffers = 4;
*/
java.util.List getBlocksBuffersList();
/**
* repeated bytes blocksBuffers = 4;
*/
int getBlocksBuffersCount();
/**
* repeated bytes blocksBuffers = 4;
*/
com.google.protobuf.ByteString getBlocksBuffers(int index);
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.StorageBlockReportProto}
*
*
**
* Report of blocks in a storage
*
*/
public static final class StorageBlockReportProto extends
com.google.protobuf.GeneratedMessage
implements StorageBlockReportProtoOrBuilder {
// Use StorageBlockReportProto.newBuilder() to construct.
private StorageBlockReportProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private StorageBlockReportProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final StorageBlockReportProto defaultInstance;
public static StorageBlockReportProto getDefaultInstance() {
return defaultInstance;
}
public StorageBlockReportProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private StorageBlockReportProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = storage_.toBuilder();
}
storage_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(storage_);
storage_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 16: {
if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
blocks_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000002;
}
blocks_.add(input.readUInt64());
break;
}
case 18: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00000002) == 0x00000002) && input.getBytesUntilLimit() > 0) {
blocks_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000002;
}
while (input.getBytesUntilLimit() > 0) {
blocks_.add(input.readUInt64());
}
input.popLimit(limit);
break;
}
case 24: {
bitField0_ |= 0x00000002;
numberOfBlocks_ = input.readUInt64();
break;
}
case 34: {
if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
blocksBuffers_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000008;
}
blocksBuffers_.add(input.readBytes());
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
blocks_ = java.util.Collections.unmodifiableList(blocks_);
}
if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
blocksBuffers_ = java.util.Collections.unmodifiableList(blocksBuffers_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_StorageBlockReportProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_StorageBlockReportProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public StorageBlockReportProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new StorageBlockReportProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.DatanodeStorageProto storage = 1;
public static final int STORAGE_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto storage_;
/**
* required .hadoop.hdfs.DatanodeStorageProto storage = 1;
*
*
* Storage
*
*/
public boolean hasStorage() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.DatanodeStorageProto storage = 1;
*
*
* Storage
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getStorage() {
return storage_;
}
/**
* required .hadoop.hdfs.DatanodeStorageProto storage = 1;
*
*
* Storage
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder() {
return storage_;
}
// repeated uint64 blocks = 2 [packed = true];
public static final int BLOCKS_FIELD_NUMBER = 2;
private java.util.List blocks_;
/**
* repeated uint64 blocks = 2 [packed = true];
*/
public java.util.List
getBlocksList() {
return blocks_;
}
/**
* repeated uint64 blocks = 2 [packed = true];
*/
public int getBlocksCount() {
return blocks_.size();
}
/**
* repeated uint64 blocks = 2 [packed = true];
*/
public long getBlocks(int index) {
return blocks_.get(index);
}
private int blocksMemoizedSerializedSize = -1;
// optional uint64 numberOfBlocks = 3;
public static final int NUMBEROFBLOCKS_FIELD_NUMBER = 3;
private long numberOfBlocks_;
/**
* optional uint64 numberOfBlocks = 3;
*/
public boolean hasNumberOfBlocks() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional uint64 numberOfBlocks = 3;
*/
public long getNumberOfBlocks() {
return numberOfBlocks_;
}
// repeated bytes blocksBuffers = 4;
public static final int BLOCKSBUFFERS_FIELD_NUMBER = 4;
private java.util.List blocksBuffers_;
/**
* repeated bytes blocksBuffers = 4;
*/
public java.util.List
getBlocksBuffersList() {
return blocksBuffers_;
}
/**
* repeated bytes blocksBuffers = 4;
*/
public int getBlocksBuffersCount() {
return blocksBuffers_.size();
}
/**
* repeated bytes blocksBuffers = 4;
*/
public com.google.protobuf.ByteString getBlocksBuffers(int index) {
return blocksBuffers_.get(index);
}
private void initFields() {
storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance();
blocks_ = java.util.Collections.emptyList();
numberOfBlocks_ = 0L;
blocksBuffers_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasStorage()) {
memoizedIsInitialized = 0;
return false;
}
if (!getStorage().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, storage_);
}
if (getBlocksList().size() > 0) {
output.writeRawVarint32(18);
output.writeRawVarint32(blocksMemoizedSerializedSize);
}
for (int i = 0; i < blocks_.size(); i++) {
output.writeUInt64NoTag(blocks_.get(i));
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(3, numberOfBlocks_);
}
for (int i = 0; i < blocksBuffers_.size(); i++) {
output.writeBytes(4, blocksBuffers_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, storage_);
}
{
int dataSize = 0;
for (int i = 0; i < blocks_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeUInt64SizeNoTag(blocks_.get(i));
}
size += dataSize;
if (!getBlocksList().isEmpty()) {
size += 1;
size += com.google.protobuf.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
blocksMemoizedSerializedSize = dataSize;
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(3, numberOfBlocks_);
}
{
int dataSize = 0;
for (int i = 0; i < blocksBuffers_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeBytesSizeNoTag(blocksBuffers_.get(i));
}
size += dataSize;
size += 1 * getBlocksBuffersList().size();
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto) obj;
boolean result = true;
result = result && (hasStorage() == other.hasStorage());
if (hasStorage()) {
result = result && getStorage()
.equals(other.getStorage());
}
result = result && getBlocksList()
.equals(other.getBlocksList());
result = result && (hasNumberOfBlocks() == other.hasNumberOfBlocks());
if (hasNumberOfBlocks()) {
result = result && (getNumberOfBlocks()
== other.getNumberOfBlocks());
}
result = result && getBlocksBuffersList()
.equals(other.getBlocksBuffersList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasStorage()) {
hash = (37 * hash) + STORAGE_FIELD_NUMBER;
hash = (53 * hash) + getStorage().hashCode();
}
if (getBlocksCount() > 0) {
hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
hash = (53 * hash) + getBlocksList().hashCode();
}
if (hasNumberOfBlocks()) {
hash = (37 * hash) + NUMBEROFBLOCKS_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getNumberOfBlocks());
}
if (getBlocksBuffersCount() > 0) {
hash = (37 * hash) + BLOCKSBUFFERS_FIELD_NUMBER;
hash = (53 * hash) + getBlocksBuffersList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.StorageBlockReportProto}
*
*
**
* Report of blocks in a storage
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_StorageBlockReportProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_StorageBlockReportProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getStorageFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (storageBuilder_ == null) {
storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance();
} else {
storageBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
blocks_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
numberOfBlocks_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
blocksBuffers_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_StorageBlockReportProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (storageBuilder_ == null) {
result.storage_ = storage_;
} else {
result.storage_ = storageBuilder_.build();
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
blocks_ = java.util.Collections.unmodifiableList(blocks_);
bitField0_ = (bitField0_ & ~0x00000002);
}
result.blocks_ = blocks_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000002;
}
result.numberOfBlocks_ = numberOfBlocks_;
if (((bitField0_ & 0x00000008) == 0x00000008)) {
blocksBuffers_ = java.util.Collections.unmodifiableList(blocksBuffers_);
bitField0_ = (bitField0_ & ~0x00000008);
}
result.blocksBuffers_ = blocksBuffers_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.getDefaultInstance()) return this;
if (other.hasStorage()) {
mergeStorage(other.getStorage());
}
if (!other.blocks_.isEmpty()) {
if (blocks_.isEmpty()) {
blocks_ = other.blocks_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureBlocksIsMutable();
blocks_.addAll(other.blocks_);
}
onChanged();
}
if (other.hasNumberOfBlocks()) {
setNumberOfBlocks(other.getNumberOfBlocks());
}
if (!other.blocksBuffers_.isEmpty()) {
if (blocksBuffers_.isEmpty()) {
blocksBuffers_ = other.blocksBuffers_;
bitField0_ = (bitField0_ & ~0x00000008);
} else {
ensureBlocksBuffersIsMutable();
blocksBuffers_.addAll(other.blocksBuffers_);
}
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasStorage()) {
return false;
}
if (!getStorage().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.DatanodeStorageProto storage = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder> storageBuilder_;
/**
* required .hadoop.hdfs.DatanodeStorageProto storage = 1;
*
*
* Storage
*
*/
public boolean hasStorage() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.DatanodeStorageProto storage = 1;
*
*
* Storage
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getStorage() {
if (storageBuilder_ == null) {
return storage_;
} else {
return storageBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.DatanodeStorageProto storage = 1;
*
*
* Storage
*
*/
public Builder setStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto value) {
if (storageBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
storage_ = value;
onChanged();
} else {
storageBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.DatanodeStorageProto storage = 1;
*
*
* Storage
*
*/
public Builder setStorage(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder builderForValue) {
if (storageBuilder_ == null) {
storage_ = builderForValue.build();
onChanged();
} else {
storageBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.DatanodeStorageProto storage = 1;
*
*
* Storage
*
*/
public Builder mergeStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto value) {
if (storageBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
storage_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance()) {
storage_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.newBuilder(storage_).mergeFrom(value).buildPartial();
} else {
storage_ = value;
}
onChanged();
} else {
storageBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.DatanodeStorageProto storage = 1;
*
*
* Storage
*
*/
public Builder clearStorage() {
if (storageBuilder_ == null) {
storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance();
onChanged();
} else {
storageBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.DatanodeStorageProto storage = 1;
*
*
* Storage
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder getStorageBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getStorageFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.DatanodeStorageProto storage = 1;
*
*
* Storage
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder() {
if (storageBuilder_ != null) {
return storageBuilder_.getMessageOrBuilder();
} else {
return storage_;
}
}
/**
* required .hadoop.hdfs.DatanodeStorageProto storage = 1;
*
*
* Storage
*
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder>
getStorageFieldBuilder() {
if (storageBuilder_ == null) {
storageBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder>(
storage_,
getParentForChildren(),
isClean());
storage_ = null;
}
return storageBuilder_;
}
// repeated uint64 blocks = 2 [packed = true];
private java.util.List blocks_ = java.util.Collections.emptyList();
private void ensureBlocksIsMutable() {
if (!((bitField0_ & 0x00000002) == 0x00000002)) {
blocks_ = new java.util.ArrayList(blocks_);
bitField0_ |= 0x00000002;
}
}
/**
* repeated uint64 blocks = 2 [packed = true];
*/
public java.util.List
getBlocksList() {
return java.util.Collections.unmodifiableList(blocks_);
}
/**
* repeated uint64 blocks = 2 [packed = true];
*/
public int getBlocksCount() {
return blocks_.size();
}
/**
* repeated uint64 blocks = 2 [packed = true];
*/
public long getBlocks(int index) {
return blocks_.get(index);
}
/**
* repeated uint64 blocks = 2 [packed = true];
*/
public Builder setBlocks(
int index, long value) {
ensureBlocksIsMutable();
blocks_.set(index, value);
onChanged();
return this;
}
/**
* repeated uint64 blocks = 2 [packed = true];
*/
public Builder addBlocks(long value) {
ensureBlocksIsMutable();
blocks_.add(value);
onChanged();
return this;
}
/**
* repeated uint64 blocks = 2 [packed = true];
*/
public Builder addAllBlocks(
java.lang.Iterable extends java.lang.Long> values) {
ensureBlocksIsMutable();
super.addAll(values, blocks_);
onChanged();
return this;
}
/**
* repeated uint64 blocks = 2 [packed = true];
*/
public Builder clearBlocks() {
blocks_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
return this;
}
// optional uint64 numberOfBlocks = 3;
private long numberOfBlocks_ ;
/**
* optional uint64 numberOfBlocks = 3;
*/
public boolean hasNumberOfBlocks() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional uint64 numberOfBlocks = 3;
*/
public long getNumberOfBlocks() {
return numberOfBlocks_;
}
/**
* optional uint64 numberOfBlocks = 3;
*/
public Builder setNumberOfBlocks(long value) {
bitField0_ |= 0x00000004;
numberOfBlocks_ = value;
onChanged();
return this;
}
/**
* optional uint64 numberOfBlocks = 3;
*/
public Builder clearNumberOfBlocks() {
bitField0_ = (bitField0_ & ~0x00000004);
numberOfBlocks_ = 0L;
onChanged();
return this;
}
// repeated bytes blocksBuffers = 4;
private java.util.List blocksBuffers_ = java.util.Collections.emptyList();
private void ensureBlocksBuffersIsMutable() {
if (!((bitField0_ & 0x00000008) == 0x00000008)) {
blocksBuffers_ = new java.util.ArrayList(blocksBuffers_);
bitField0_ |= 0x00000008;
}
}
/**
* repeated bytes blocksBuffers = 4;
*/
public java.util.List
getBlocksBuffersList() {
return java.util.Collections.unmodifiableList(blocksBuffers_);
}
/**
* repeated bytes blocksBuffers = 4;
*/
public int getBlocksBuffersCount() {
return blocksBuffers_.size();
}
/**
* repeated bytes blocksBuffers = 4;
*/
public com.google.protobuf.ByteString getBlocksBuffers(int index) {
return blocksBuffers_.get(index);
}
/**
* repeated bytes blocksBuffers = 4;
*/
public Builder setBlocksBuffers(
int index, com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksBuffersIsMutable();
blocksBuffers_.set(index, value);
onChanged();
return this;
}
/**
* repeated bytes blocksBuffers = 4;
*/
public Builder addBlocksBuffers(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksBuffersIsMutable();
blocksBuffers_.add(value);
onChanged();
return this;
}
/**
* repeated bytes blocksBuffers = 4;
*/
public Builder addAllBlocksBuffers(
java.lang.Iterable extends com.google.protobuf.ByteString> values) {
ensureBlocksBuffersIsMutable();
super.addAll(values, blocksBuffers_);
onChanged();
return this;
}
/**
* repeated bytes blocksBuffers = 4;
*/
public Builder clearBlocksBuffers() {
blocksBuffers_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000008);
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.StorageBlockReportProto)
}
static {
defaultInstance = new StorageBlockReportProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.StorageBlockReportProto)
}
public interface BlockReportResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
/**
* optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
*/
boolean hasCmd();
/**
* optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmd();
/**
* optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.BlockReportResponseProto}
*
*
**
* cmd - Command from namenode to the datanode
*
*/
public static final class BlockReportResponseProto extends
com.google.protobuf.GeneratedMessage
implements BlockReportResponseProtoOrBuilder {
// Use BlockReportResponseProto.newBuilder() to construct.
private BlockReportResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private BlockReportResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final BlockReportResponseProto defaultInstance;
public static BlockReportResponseProto getDefaultInstance() {
return defaultInstance;
}
public BlockReportResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private BlockReportResponseProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = cmd_.toBuilder();
}
cmd_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(cmd_);
cmd_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReportResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReportResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public BlockReportResponseProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new BlockReportResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
public static final int CMD_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto cmd_;
/**
* optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
*/
public boolean hasCmd() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmd() {
return cmd_;
}
/**
* optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdOrBuilder() {
return cmd_;
}
private void initFields() {
cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (hasCmd()) {
if (!getCmd().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, cmd_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, cmd_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto) obj;
boolean result = true;
result = result && (hasCmd() == other.hasCmd());
if (hasCmd()) {
result = result && getCmd()
.equals(other.getCmd());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasCmd()) {
hash = (37 * hash) + CMD_FIELD_NUMBER;
hash = (53 * hash) + getCmd().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.BlockReportResponseProto}
*
*
**
* cmd - Command from namenode to the datanode
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReportResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReportResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getCmdFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (cmdBuilder_ == null) {
cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance();
} else {
cmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReportResponseProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (cmdBuilder_ == null) {
result.cmd_ = cmd_;
} else {
result.cmd_ = cmdBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDefaultInstance()) return this;
if (other.hasCmd()) {
mergeCmd(other.getCmd());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (hasCmd()) {
if (!getCmd().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder> cmdBuilder_;
/**
* optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
*/
public boolean hasCmd() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmd() {
if (cmdBuilder_ == null) {
return cmd_;
} else {
return cmdBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
*/
public Builder setCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto value) {
if (cmdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
cmd_ = value;
onChanged();
} else {
cmdBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
*/
public Builder setCmd(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder builderForValue) {
if (cmdBuilder_ == null) {
cmd_ = builderForValue.build();
onChanged();
} else {
cmdBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
*/
public Builder mergeCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto value) {
if (cmdBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
cmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance()) {
cmd_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.newBuilder(cmd_).mergeFrom(value).buildPartial();
} else {
cmd_ = value;
}
onChanged();
} else {
cmdBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
*/
public Builder clearCmd() {
if (cmdBuilder_ == null) {
cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance();
onChanged();
} else {
cmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder getCmdBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getCmdFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdOrBuilder() {
if (cmdBuilder_ != null) {
return cmdBuilder_.getMessageOrBuilder();
} else {
return cmd_;
}
}
/**
* optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder>
getCmdFieldBuilder() {
if (cmdBuilder_ == null) {
cmdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder>(
cmd_,
getParentForChildren(),
isClean());
cmd_ = null;
}
return cmdBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.BlockReportResponseProto)
}
static {
defaultInstance = new BlockReportResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.BlockReportResponseProto)
}
public interface CacheReportRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
boolean hasRegistration();
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration();
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder();
// required string blockPoolId = 2;
/**
* required string blockPoolId = 2;
*/
boolean hasBlockPoolId();
/**
* required string blockPoolId = 2;
*/
java.lang.String getBlockPoolId();
/**
* required string blockPoolId = 2;
*/
com.google.protobuf.ByteString
getBlockPoolIdBytes();
// repeated uint64 blocks = 3 [packed = true];
/**
* repeated uint64 blocks = 3 [packed = true];
*/
java.util.List getBlocksList();
/**
* repeated uint64 blocks = 3 [packed = true];
*/
int getBlocksCount();
/**
* repeated uint64 blocks = 3 [packed = true];
*/
long getBlocks(int index);
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.CacheReportRequestProto}
*
*
**
* registration - datanode registration information
* blockPoolId - block pool ID of the reported blocks
* blocks - representation of blocks as longs for efficiency reasons
*
*/
public static final class CacheReportRequestProto extends
com.google.protobuf.GeneratedMessage
implements CacheReportRequestProtoOrBuilder {
// Use CacheReportRequestProto.newBuilder() to construct.
private CacheReportRequestProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private CacheReportRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final CacheReportRequestProto defaultInstance;
public static CacheReportRequestProto getDefaultInstance() {
return defaultInstance;
}
public CacheReportRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private CacheReportRequestProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = registration_.toBuilder();
}
registration_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(registration_);
registration_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
bitField0_ |= 0x00000002;
blockPoolId_ = input.readBytes();
break;
}
case 24: {
if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
blocks_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000004;
}
blocks_.add(input.readUInt64());
break;
}
case 26: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00000004) == 0x00000004) && input.getBytesUntilLimit() > 0) {
blocks_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000004;
}
while (input.getBytesUntilLimit() > 0) {
blocks_.add(input.readUInt64());
}
input.popLimit(limit);
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
blocks_ = java.util.Collections.unmodifiableList(blocks_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_CacheReportRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_CacheReportRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public CacheReportRequestProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new CacheReportRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
public static final int REGISTRATION_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_;
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public boolean hasRegistration() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
return registration_;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
return registration_;
}
// required string blockPoolId = 2;
public static final int BLOCKPOOLID_FIELD_NUMBER = 2;
private java.lang.Object blockPoolId_;
/**
* required string blockPoolId = 2;
*/
public boolean hasBlockPoolId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string blockPoolId = 2;
*/
public java.lang.String getBlockPoolId() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
blockPoolId_ = s;
}
return s;
}
}
/**
* required string blockPoolId = 2;
*/
public com.google.protobuf.ByteString
getBlockPoolIdBytes() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
blockPoolId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// repeated uint64 blocks = 3 [packed = true];
public static final int BLOCKS_FIELD_NUMBER = 3;
private java.util.List blocks_;
/**
* repeated uint64 blocks = 3 [packed = true];
*/
public java.util.List
getBlocksList() {
return blocks_;
}
/**
* repeated uint64 blocks = 3 [packed = true];
*/
public int getBlocksCount() {
return blocks_.size();
}
/**
* repeated uint64 blocks = 3 [packed = true];
*/
public long getBlocks(int index) {
return blocks_.get(index);
}
private int blocksMemoizedSerializedSize = -1;
private void initFields() {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
blockPoolId_ = "";
blocks_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasRegistration()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasBlockPoolId()) {
memoizedIsInitialized = 0;
return false;
}
if (!getRegistration().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, registration_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getBlockPoolIdBytes());
}
if (getBlocksList().size() > 0) {
output.writeRawVarint32(26);
output.writeRawVarint32(blocksMemoizedSerializedSize);
}
for (int i = 0; i < blocks_.size(); i++) {
output.writeUInt64NoTag(blocks_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, registration_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getBlockPoolIdBytes());
}
{
int dataSize = 0;
for (int i = 0; i < blocks_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeUInt64SizeNoTag(blocks_.get(i));
}
size += dataSize;
if (!getBlocksList().isEmpty()) {
size += 1;
size += com.google.protobuf.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
blocksMemoizedSerializedSize = dataSize;
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto) obj;
boolean result = true;
result = result && (hasRegistration() == other.hasRegistration());
if (hasRegistration()) {
result = result && getRegistration()
.equals(other.getRegistration());
}
result = result && (hasBlockPoolId() == other.hasBlockPoolId());
if (hasBlockPoolId()) {
result = result && getBlockPoolId()
.equals(other.getBlockPoolId());
}
result = result && getBlocksList()
.equals(other.getBlocksList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasRegistration()) {
hash = (37 * hash) + REGISTRATION_FIELD_NUMBER;
hash = (53 * hash) + getRegistration().hashCode();
}
if (hasBlockPoolId()) {
hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER;
hash = (53 * hash) + getBlockPoolId().hashCode();
}
if (getBlocksCount() > 0) {
hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
hash = (53 * hash) + getBlocksList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.CacheReportRequestProto}
*
*
**
* registration - datanode registration information
* blockPoolId - block pool ID of the reported blocks
* blocks - representation of blocks as longs for efficiency reasons
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_CacheReportRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_CacheReportRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getRegistrationFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (registrationBuilder_ == null) {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
} else {
registrationBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
blockPoolId_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
blocks_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_CacheReportRequestProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (registrationBuilder_ == null) {
result.registration_ = registration_;
} else {
result.registration_ = registrationBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.blockPoolId_ = blockPoolId_;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
blocks_ = java.util.Collections.unmodifiableList(blocks_);
bitField0_ = (bitField0_ & ~0x00000004);
}
result.blocks_ = blocks_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto.getDefaultInstance()) return this;
if (other.hasRegistration()) {
mergeRegistration(other.getRegistration());
}
if (other.hasBlockPoolId()) {
bitField0_ |= 0x00000002;
blockPoolId_ = other.blockPoolId_;
onChanged();
}
if (!other.blocks_.isEmpty()) {
if (blocks_.isEmpty()) {
blocks_ = other.blocks_;
bitField0_ = (bitField0_ & ~0x00000004);
} else {
ensureBlocksIsMutable();
blocks_.addAll(other.blocks_);
}
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasRegistration()) {
return false;
}
if (!hasBlockPoolId()) {
return false;
}
if (!getRegistration().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registrationBuilder_;
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public boolean hasRegistration() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
if (registrationBuilder_ == null) {
return registration_;
} else {
return registrationBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
if (registrationBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
registration_ = value;
onChanged();
} else {
registrationBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public Builder setRegistration(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) {
if (registrationBuilder_ == null) {
registration_ = builderForValue.build();
onChanged();
} else {
registrationBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
if (registrationBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
registration_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) {
registration_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial();
} else {
registration_ = value;
}
onChanged();
} else {
registrationBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public Builder clearRegistration() {
if (registrationBuilder_ == null) {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
onChanged();
} else {
registrationBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistrationBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getRegistrationFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
if (registrationBuilder_ != null) {
return registrationBuilder_.getMessageOrBuilder();
} else {
return registration_;
}
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>
getRegistrationFieldBuilder() {
if (registrationBuilder_ == null) {
registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>(
registration_,
getParentForChildren(),
isClean());
registration_ = null;
}
return registrationBuilder_;
}
// required string blockPoolId = 2;
private java.lang.Object blockPoolId_ = "";
/**
* required string blockPoolId = 2;
*/
public boolean hasBlockPoolId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string blockPoolId = 2;
*/
public java.lang.String getBlockPoolId() {
java.lang.Object ref = blockPoolId_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
blockPoolId_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string blockPoolId = 2;
*/
public com.google.protobuf.ByteString
getBlockPoolIdBytes() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
blockPoolId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string blockPoolId = 2;
*/
public Builder setBlockPoolId(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
blockPoolId_ = value;
onChanged();
return this;
}
/**
* required string blockPoolId = 2;
*/
public Builder clearBlockPoolId() {
bitField0_ = (bitField0_ & ~0x00000002);
blockPoolId_ = getDefaultInstance().getBlockPoolId();
onChanged();
return this;
}
/**
* required string blockPoolId = 2;
*/
public Builder setBlockPoolIdBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
blockPoolId_ = value;
onChanged();
return this;
}
// repeated uint64 blocks = 3 [packed = true];
private java.util.List blocks_ = java.util.Collections.emptyList();
private void ensureBlocksIsMutable() {
if (!((bitField0_ & 0x00000004) == 0x00000004)) {
blocks_ = new java.util.ArrayList(blocks_);
bitField0_ |= 0x00000004;
}
}
/**
* repeated uint64 blocks = 3 [packed = true];
*/
public java.util.List
getBlocksList() {
return java.util.Collections.unmodifiableList(blocks_);
}
/**
* repeated uint64 blocks = 3 [packed = true];
*/
public int getBlocksCount() {
return blocks_.size();
}
/**
* repeated uint64 blocks = 3 [packed = true];
*/
public long getBlocks(int index) {
return blocks_.get(index);
}
/**
* repeated uint64 blocks = 3 [packed = true];
*/
public Builder setBlocks(
int index, long value) {
ensureBlocksIsMutable();
blocks_.set(index, value);
onChanged();
return this;
}
/**
* repeated uint64 blocks = 3 [packed = true];
*/
public Builder addBlocks(long value) {
ensureBlocksIsMutable();
blocks_.add(value);
onChanged();
return this;
}
/**
* repeated uint64 blocks = 3 [packed = true];
*/
public Builder addAllBlocks(
java.lang.Iterable extends java.lang.Long> values) {
ensureBlocksIsMutable();
super.addAll(values, blocks_);
onChanged();
return this;
}
/**
* repeated uint64 blocks = 3 [packed = true];
*/
public Builder clearBlocks() {
blocks_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.CacheReportRequestProto)
}
static {
defaultInstance = new CacheReportRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.CacheReportRequestProto)
}
public interface CacheReportResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
/**
* optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
*/
boolean hasCmd();
/**
* optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmd();
/**
* optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.CacheReportResponseProto}
*/
public static final class CacheReportResponseProto extends
com.google.protobuf.GeneratedMessage
implements CacheReportResponseProtoOrBuilder {
// Use CacheReportResponseProto.newBuilder() to construct.
private CacheReportResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private CacheReportResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final CacheReportResponseProto defaultInstance;
public static CacheReportResponseProto getDefaultInstance() {
return defaultInstance;
}
public CacheReportResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private CacheReportResponseProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = cmd_.toBuilder();
}
cmd_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(cmd_);
cmd_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_CacheReportResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_CacheReportResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public CacheReportResponseProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new CacheReportResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
public static final int CMD_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto cmd_;
/**
* optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
*/
public boolean hasCmd() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmd() {
return cmd_;
}
/**
* optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdOrBuilder() {
return cmd_;
}
private void initFields() {
cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (hasCmd()) {
if (!getCmd().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, cmd_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, cmd_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto) obj;
boolean result = true;
result = result && (hasCmd() == other.hasCmd());
if (hasCmd()) {
result = result && getCmd()
.equals(other.getCmd());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasCmd()) {
hash = (37 * hash) + CMD_FIELD_NUMBER;
hash = (53 * hash) + getCmd().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.CacheReportResponseProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_CacheReportResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_CacheReportResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getCmdFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (cmdBuilder_ == null) {
cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance();
} else {
cmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_CacheReportResponseProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (cmdBuilder_ == null) {
result.cmd_ = cmd_;
} else {
result.cmd_ = cmdBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto.getDefaultInstance()) return this;
if (other.hasCmd()) {
mergeCmd(other.getCmd());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (hasCmd()) {
if (!getCmd().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder> cmdBuilder_;
/**
* optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
*/
public boolean hasCmd() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmd() {
if (cmdBuilder_ == null) {
return cmd_;
} else {
return cmdBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
*/
public Builder setCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto value) {
if (cmdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
cmd_ = value;
onChanged();
} else {
cmdBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
*/
public Builder setCmd(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder builderForValue) {
if (cmdBuilder_ == null) {
cmd_ = builderForValue.build();
onChanged();
} else {
cmdBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
*/
public Builder mergeCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto value) {
if (cmdBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
cmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance()) {
cmd_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.newBuilder(cmd_).mergeFrom(value).buildPartial();
} else {
cmd_ = value;
}
onChanged();
} else {
cmdBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
*/
public Builder clearCmd() {
if (cmdBuilder_ == null) {
cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance();
onChanged();
} else {
cmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder getCmdBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getCmdFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdOrBuilder() {
if (cmdBuilder_ != null) {
return cmdBuilder_.getMessageOrBuilder();
} else {
return cmd_;
}
}
/**
* optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder>
getCmdFieldBuilder() {
if (cmdBuilder_ == null) {
cmdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder>(
cmd_,
getParentForChildren(),
isClean());
cmd_ = null;
}
return cmdBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.CacheReportResponseProto)
}
static {
defaultInstance = new CacheReportResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.CacheReportResponseProto)
}
public interface ReceivedDeletedBlockInfoProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.BlockProto block = 1;
/**
* required .hadoop.hdfs.BlockProto block = 1;
*/
boolean hasBlock();
/**
* required .hadoop.hdfs.BlockProto block = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock();
/**
* required .hadoop.hdfs.BlockProto block = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder();
// required .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto.BlockStatus status = 3;
/**
* required .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto.BlockStatus status = 3;
*/
boolean hasStatus();
/**
* required .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto.BlockStatus status = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus getStatus();
// optional string deleteHint = 2;
/**
* optional string deleteHint = 2;
*/
boolean hasDeleteHint();
/**
* optional string deleteHint = 2;
*/
java.lang.String getDeleteHint();
/**
* optional string deleteHint = 2;
*/
com.google.protobuf.ByteString
getDeleteHintBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto}
*
*
**
* Data structure to send received or deleted block information
* from datanode to namenode.
*
*/
public static final class ReceivedDeletedBlockInfoProto extends
com.google.protobuf.GeneratedMessage
implements ReceivedDeletedBlockInfoProtoOrBuilder {
// Use ReceivedDeletedBlockInfoProto.newBuilder() to construct.
private ReceivedDeletedBlockInfoProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ReceivedDeletedBlockInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ReceivedDeletedBlockInfoProto defaultInstance;
public static ReceivedDeletedBlockInfoProto getDefaultInstance() {
return defaultInstance;
}
public ReceivedDeletedBlockInfoProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ReceivedDeletedBlockInfoProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = block_.toBuilder();
}
block_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(block_);
block_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
bitField0_ |= 0x00000004;
deleteHint_ = input.readBytes();
break;
}
case 24: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus value = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(3, rawValue);
} else {
bitField0_ |= 0x00000002;
status_ = value;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ReceivedDeletedBlockInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ReceivedDeletedBlockInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ReceivedDeletedBlockInfoProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ReceivedDeletedBlockInfoProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
/**
* Protobuf enum {@code hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto.BlockStatus}
*/
public enum BlockStatus
implements com.google.protobuf.ProtocolMessageEnum {
/**
* RECEIVING = 1;
*
*
* block being created
*
*/
RECEIVING(0, 1),
/**
* RECEIVED = 2;
*
*
* block creation complete
*
*/
RECEIVED(1, 2),
/**
* DELETED = 3;
*/
DELETED(2, 3),
;
/**
* RECEIVING = 1;
*
*
* block being created
*
*/
public static final int RECEIVING_VALUE = 1;
/**
* RECEIVED = 2;
*
*
* block creation complete
*
*/
public static final int RECEIVED_VALUE = 2;
/**
* DELETED = 3;
*/
public static final int DELETED_VALUE = 3;
public final int getNumber() { return value; }
public static BlockStatus valueOf(int value) {
switch (value) {
case 1: return RECEIVING;
case 2: return RECEIVED;
case 3: return DELETED;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public BlockStatus findValueByNumber(int number) {
return BlockStatus.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.getDescriptor().getEnumTypes().get(0);
}
private static final BlockStatus[] VALUES = values();
public static BlockStatus valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private BlockStatus(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto.BlockStatus)
}
private int bitField0_;
// required .hadoop.hdfs.BlockProto block = 1;
public static final int BLOCK_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto block_;
/**
* required .hadoop.hdfs.BlockProto block = 1;
*/
public boolean hasBlock() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.BlockProto block = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock() {
return block_;
}
/**
* required .hadoop.hdfs.BlockProto block = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder() {
return block_;
}
// required .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto.BlockStatus status = 3;
public static final int STATUS_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus status_;
/**
* required .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto.BlockStatus status = 3;
*/
public boolean hasStatus() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto.BlockStatus status = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus getStatus() {
return status_;
}
// optional string deleteHint = 2;
public static final int DELETEHINT_FIELD_NUMBER = 2;
private java.lang.Object deleteHint_;
/**
* optional string deleteHint = 2;
*/
public boolean hasDeleteHint() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional string deleteHint = 2;
*/
public java.lang.String getDeleteHint() {
java.lang.Object ref = deleteHint_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
deleteHint_ = s;
}
return s;
}
}
/**
* optional string deleteHint = 2;
*/
public com.google.protobuf.ByteString
getDeleteHintBytes() {
java.lang.Object ref = deleteHint_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
deleteHint_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
status_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus.RECEIVING;
deleteHint_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasBlock()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasStatus()) {
memoizedIsInitialized = 0;
return false;
}
if (!getBlock().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, block_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(2, getDeleteHintBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeEnum(3, status_.getNumber());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, block_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getDeleteHintBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(3, status_.getNumber());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto) obj;
boolean result = true;
result = result && (hasBlock() == other.hasBlock());
if (hasBlock()) {
result = result && getBlock()
.equals(other.getBlock());
}
result = result && (hasStatus() == other.hasStatus());
if (hasStatus()) {
result = result &&
(getStatus() == other.getStatus());
}
result = result && (hasDeleteHint() == other.hasDeleteHint());
if (hasDeleteHint()) {
result = result && getDeleteHint()
.equals(other.getDeleteHint());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasBlock()) {
hash = (37 * hash) + BLOCK_FIELD_NUMBER;
hash = (53 * hash) + getBlock().hashCode();
}
if (hasStatus()) {
hash = (37 * hash) + STATUS_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getStatus());
}
if (hasDeleteHint()) {
hash = (37 * hash) + DELETEHINT_FIELD_NUMBER;
hash = (53 * hash) + getDeleteHint().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto}
*
*
**
* Data structure to send received or deleted block information
* from datanode to namenode.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ReceivedDeletedBlockInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ReceivedDeletedBlockInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getBlockFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (blockBuilder_ == null) {
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
status_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus.RECEIVING;
bitField0_ = (bitField0_ & ~0x00000002);
deleteHint_ = "";
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ReceivedDeletedBlockInfoProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (blockBuilder_ == null) {
result.block_ = block_;
} else {
result.block_ = blockBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.status_ = status_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.deleteHint_ = deleteHint_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.getDefaultInstance()) return this;
if (other.hasBlock()) {
mergeBlock(other.getBlock());
}
if (other.hasStatus()) {
setStatus(other.getStatus());
}
if (other.hasDeleteHint()) {
bitField0_ |= 0x00000004;
deleteHint_ = other.deleteHint_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasBlock()) {
return false;
}
if (!hasStatus()) {
return false;
}
if (!getBlock().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.BlockProto block = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> blockBuilder_;
/**
* required .hadoop.hdfs.BlockProto block = 1;
*/
public boolean hasBlock() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.BlockProto block = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock() {
if (blockBuilder_ == null) {
return block_;
} else {
return blockBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.BlockProto block = 1;
*/
public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
if (blockBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
block_ = value;
onChanged();
} else {
blockBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BlockProto block = 1;
*/
public Builder setBlock(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
if (blockBuilder_ == null) {
block_ = builderForValue.build();
onChanged();
} else {
blockBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BlockProto block = 1;
*/
public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
if (blockBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) {
block_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(block_).mergeFrom(value).buildPartial();
} else {
block_ = value;
}
onChanged();
} else {
blockBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BlockProto block = 1;
*/
public Builder clearBlock() {
if (blockBuilder_ == null) {
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
onChanged();
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.BlockProto block = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder getBlockBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getBlockFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.BlockProto block = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder() {
if (blockBuilder_ != null) {
return blockBuilder_.getMessageOrBuilder();
} else {
return block_;
}
}
/**
* required .hadoop.hdfs.BlockProto block = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>
getBlockFieldBuilder() {
if (blockBuilder_ == null) {
blockBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>(
block_,
getParentForChildren(),
isClean());
block_ = null;
}
return blockBuilder_;
}
// required .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto.BlockStatus status = 3;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus status_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus.RECEIVING;
/**
* required .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto.BlockStatus status = 3;
*/
public boolean hasStatus() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto.BlockStatus status = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus getStatus() {
return status_;
}
/**
* required .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto.BlockStatus status = 3;
*/
public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
status_ = value;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto.BlockStatus status = 3;
*/
public Builder clearStatus() {
bitField0_ = (bitField0_ & ~0x00000002);
status_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus.RECEIVING;
onChanged();
return this;
}
// optional string deleteHint = 2;
private java.lang.Object deleteHint_ = "";
/**
* optional string deleteHint = 2;
*/
public boolean hasDeleteHint() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional string deleteHint = 2;
*/
public java.lang.String getDeleteHint() {
java.lang.Object ref = deleteHint_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
deleteHint_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string deleteHint = 2;
*/
public com.google.protobuf.ByteString
getDeleteHintBytes() {
java.lang.Object ref = deleteHint_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
deleteHint_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* optional string deleteHint = 2;
*/
public Builder setDeleteHint(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
deleteHint_ = value;
onChanged();
return this;
}
/**
* optional string deleteHint = 2;
*/
public Builder clearDeleteHint() {
bitField0_ = (bitField0_ & ~0x00000004);
deleteHint_ = getDefaultInstance().getDeleteHint();
onChanged();
return this;
}
/**
* optional string deleteHint = 2;
*/
public Builder setDeleteHintBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
deleteHint_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto)
}
static {
defaultInstance = new ReceivedDeletedBlockInfoProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto)
}
public interface StorageReceivedDeletedBlocksProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string storageUuid = 1 [deprecated = true];
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated boolean hasStorageUuid();
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated java.lang.String getStorageUuid();
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated com.google.protobuf.ByteString
getStorageUuidBytes();
// repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
/**
* repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
*/
java.util.List
getBlocksList();
/**
* repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto getBlocks(int index);
/**
* repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
*/
int getBlocksCount();
/**
* repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder>
getBlocksOrBuilderList();
/**
* repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder getBlocksOrBuilder(
int index);
// optional .hadoop.hdfs.DatanodeStorageProto storage = 3;
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 3;
*
*
* supersedes storageUuid.
*
*/
boolean hasStorage();
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 3;
*
*
* supersedes storageUuid.
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getStorage();
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 3;
*
*
* supersedes storageUuid.
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto}
*
*
**
* List of blocks received and deleted for a storage.
*
*/
public static final class StorageReceivedDeletedBlocksProto extends
com.google.protobuf.GeneratedMessage
implements StorageReceivedDeletedBlocksProtoOrBuilder {
// Use StorageReceivedDeletedBlocksProto.newBuilder() to construct.
private StorageReceivedDeletedBlocksProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private StorageReceivedDeletedBlocksProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final StorageReceivedDeletedBlocksProto defaultInstance;
public static StorageReceivedDeletedBlocksProto getDefaultInstance() {
return defaultInstance;
}
public StorageReceivedDeletedBlocksProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private StorageReceivedDeletedBlocksProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
storageUuid_ = input.readBytes();
break;
}
case 18: {
if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
blocks_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000002;
}
blocks_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.PARSER, extensionRegistry));
break;
}
case 26: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
subBuilder = storage_.toBuilder();
}
storage_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(storage_);
storage_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
blocks_ = java.util.Collections.unmodifiableList(blocks_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_StorageReceivedDeletedBlocksProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_StorageReceivedDeletedBlocksProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public StorageReceivedDeletedBlocksProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new StorageReceivedDeletedBlocksProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required string storageUuid = 1 [deprecated = true];
public static final int STORAGEUUID_FIELD_NUMBER = 1;
private java.lang.Object storageUuid_;
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated public boolean hasStorageUuid() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated public java.lang.String getStorageUuid() {
java.lang.Object ref = storageUuid_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
storageUuid_ = s;
}
return s;
}
}
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated public com.google.protobuf.ByteString
getStorageUuidBytes() {
java.lang.Object ref = storageUuid_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
storageUuid_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
public static final int BLOCKS_FIELD_NUMBER = 2;
private java.util.List blocks_;
/**
* repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
*/
public java.util.List getBlocksList() {
return blocks_;
}
/**
* repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder>
getBlocksOrBuilderList() {
return blocks_;
}
/**
* repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
*/
public int getBlocksCount() {
return blocks_.size();
}
/**
* repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto getBlocks(int index) {
return blocks_.get(index);
}
/**
* repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder getBlocksOrBuilder(
int index) {
return blocks_.get(index);
}
// optional .hadoop.hdfs.DatanodeStorageProto storage = 3;
public static final int STORAGE_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto storage_;
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 3;
*
*
* supersedes storageUuid.
*
*/
public boolean hasStorage() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 3;
*
*
* supersedes storageUuid.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getStorage() {
return storage_;
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 3;
*
*
* supersedes storageUuid.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder() {
return storage_;
}
private void initFields() {
storageUuid_ = "";
blocks_ = java.util.Collections.emptyList();
storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasStorageUuid()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getBlocksCount(); i++) {
if (!getBlocks(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasStorage()) {
if (!getStorage().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getStorageUuidBytes());
}
for (int i = 0; i < blocks_.size(); i++) {
output.writeMessage(2, blocks_.get(i));
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeMessage(3, storage_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getStorageUuidBytes());
}
for (int i = 0; i < blocks_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, blocks_.get(i));
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, storage_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto) obj;
boolean result = true;
result = result && (hasStorageUuid() == other.hasStorageUuid());
if (hasStorageUuid()) {
result = result && getStorageUuid()
.equals(other.getStorageUuid());
}
result = result && getBlocksList()
.equals(other.getBlocksList());
result = result && (hasStorage() == other.hasStorage());
if (hasStorage()) {
result = result && getStorage()
.equals(other.getStorage());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasStorageUuid()) {
hash = (37 * hash) + STORAGEUUID_FIELD_NUMBER;
hash = (53 * hash) + getStorageUuid().hashCode();
}
if (getBlocksCount() > 0) {
hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
hash = (53 * hash) + getBlocksList().hashCode();
}
if (hasStorage()) {
hash = (37 * hash) + STORAGE_FIELD_NUMBER;
hash = (53 * hash) + getStorage().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto}
*
*
**
* List of blocks received and deleted for a storage.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_StorageReceivedDeletedBlocksProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_StorageReceivedDeletedBlocksProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getBlocksFieldBuilder();
getStorageFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
storageUuid_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
if (blocksBuilder_ == null) {
blocks_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
} else {
blocksBuilder_.clear();
}
if (storageBuilder_ == null) {
storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance();
} else {
storageBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_StorageReceivedDeletedBlocksProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.storageUuid_ = storageUuid_;
if (blocksBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002)) {
blocks_ = java.util.Collections.unmodifiableList(blocks_);
bitField0_ = (bitField0_ & ~0x00000002);
}
result.blocks_ = blocks_;
} else {
result.blocks_ = blocksBuilder_.build();
}
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000002;
}
if (storageBuilder_ == null) {
result.storage_ = storage_;
} else {
result.storage_ = storageBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.getDefaultInstance()) return this;
if (other.hasStorageUuid()) {
bitField0_ |= 0x00000001;
storageUuid_ = other.storageUuid_;
onChanged();
}
if (blocksBuilder_ == null) {
if (!other.blocks_.isEmpty()) {
if (blocks_.isEmpty()) {
blocks_ = other.blocks_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureBlocksIsMutable();
blocks_.addAll(other.blocks_);
}
onChanged();
}
} else {
if (!other.blocks_.isEmpty()) {
if (blocksBuilder_.isEmpty()) {
blocksBuilder_.dispose();
blocksBuilder_ = null;
blocks_ = other.blocks_;
bitField0_ = (bitField0_ & ~0x00000002);
blocksBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getBlocksFieldBuilder() : null;
} else {
blocksBuilder_.addAllMessages(other.blocks_);
}
}
}
if (other.hasStorage()) {
mergeStorage(other.getStorage());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasStorageUuid()) {
return false;
}
for (int i = 0; i < getBlocksCount(); i++) {
if (!getBlocks(i).isInitialized()) {
return false;
}
}
if (hasStorage()) {
if (!getStorage().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string storageUuid = 1 [deprecated = true];
private java.lang.Object storageUuid_ = "";
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated public boolean hasStorageUuid() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated public java.lang.String getStorageUuid() {
java.lang.Object ref = storageUuid_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
storageUuid_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated public com.google.protobuf.ByteString
getStorageUuidBytes() {
java.lang.Object ref = storageUuid_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
storageUuid_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated public Builder setStorageUuid(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
storageUuid_ = value;
onChanged();
return this;
}
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated public Builder clearStorageUuid() {
bitField0_ = (bitField0_ & ~0x00000001);
storageUuid_ = getDefaultInstance().getStorageUuid();
onChanged();
return this;
}
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated public Builder setStorageUuidBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
storageUuid_ = value;
onChanged();
return this;
}
// repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
private java.util.List blocks_ =
java.util.Collections.emptyList();
private void ensureBlocksIsMutable() {
if (!((bitField0_ & 0x00000002) == 0x00000002)) {
blocks_ = new java.util.ArrayList(blocks_);
bitField0_ |= 0x00000002;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder> blocksBuilder_;
/**
* repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
*/
public java.util.List getBlocksList() {
if (blocksBuilder_ == null) {
return java.util.Collections.unmodifiableList(blocks_);
} else {
return blocksBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
*/
public int getBlocksCount() {
if (blocksBuilder_ == null) {
return blocks_.size();
} else {
return blocksBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto getBlocks(int index) {
if (blocksBuilder_ == null) {
return blocks_.get(index);
} else {
return blocksBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
*/
public Builder setBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.set(index, value);
onChanged();
} else {
blocksBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
*/
public Builder setBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.set(index, builderForValue.build());
onChanged();
} else {
blocksBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
*/
public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.add(value);
onChanged();
} else {
blocksBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
*/
public Builder addBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.add(index, value);
onChanged();
} else {
blocksBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
*/
public Builder addBlocks(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.add(builderForValue.build());
onChanged();
} else {
blocksBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
*/
public Builder addBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.add(index, builderForValue.build());
onChanged();
} else {
blocksBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
*/
public Builder addAllBlocks(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto> values) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
super.addAll(values, blocks_);
onChanged();
} else {
blocksBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
*/
public Builder clearBlocks() {
if (blocksBuilder_ == null) {
blocks_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
} else {
blocksBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
*/
public Builder removeBlocks(int index) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.remove(index);
onChanged();
} else {
blocksBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder getBlocksBuilder(
int index) {
return getBlocksFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder getBlocksOrBuilder(
int index) {
if (blocksBuilder_ == null) {
return blocks_.get(index); } else {
return blocksBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder>
getBlocksOrBuilderList() {
if (blocksBuilder_ != null) {
return blocksBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(blocks_);
}
}
/**
* repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder addBlocksBuilder() {
return getBlocksFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder addBlocksBuilder(
int index) {
return getBlocksFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;
*/
public java.util.List
getBlocksBuilderList() {
return getBlocksFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder>
getBlocksFieldBuilder() {
if (blocksBuilder_ == null) {
blocksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder>(
blocks_,
((bitField0_ & 0x00000002) == 0x00000002),
getParentForChildren(),
isClean());
blocks_ = null;
}
return blocksBuilder_;
}
// optional .hadoop.hdfs.DatanodeStorageProto storage = 3;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder> storageBuilder_;
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 3;
*
*
* supersedes storageUuid.
*
*/
public boolean hasStorage() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 3;
*
*
* supersedes storageUuid.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getStorage() {
if (storageBuilder_ == null) {
return storage_;
} else {
return storageBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 3;
*
*
* supersedes storageUuid.
*
*/
public Builder setStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto value) {
if (storageBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
storage_ = value;
onChanged();
} else {
storageBuilder_.setMessage(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 3;
*
*
* supersedes storageUuid.
*
*/
public Builder setStorage(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder builderForValue) {
if (storageBuilder_ == null) {
storage_ = builderForValue.build();
onChanged();
} else {
storageBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 3;
*
*
* supersedes storageUuid.
*
*/
public Builder mergeStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto value) {
if (storageBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004) &&
storage_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance()) {
storage_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.newBuilder(storage_).mergeFrom(value).buildPartial();
} else {
storage_ = value;
}
onChanged();
} else {
storageBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 3;
*
*
* supersedes storageUuid.
*
*/
public Builder clearStorage() {
if (storageBuilder_ == null) {
storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance();
onChanged();
} else {
storageBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 3;
*
*
* supersedes storageUuid.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder getStorageBuilder() {
bitField0_ |= 0x00000004;
onChanged();
return getStorageFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 3;
*
*
* supersedes storageUuid.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder() {
if (storageBuilder_ != null) {
return storageBuilder_.getMessageOrBuilder();
} else {
return storage_;
}
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 3;
*
*
* supersedes storageUuid.
*
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder>
getStorageFieldBuilder() {
if (storageBuilder_ == null) {
storageBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder>(
storage_,
getParentForChildren(),
isClean());
storage_ = null;
}
return storageBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto)
}
static {
defaultInstance = new StorageReceivedDeletedBlocksProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto)
}
public interface BlockReceivedAndDeletedRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
boolean hasRegistration();
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration();
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder();
// required string blockPoolId = 2;
/**
* required string blockPoolId = 2;
*/
boolean hasBlockPoolId();
/**
* required string blockPoolId = 2;
*/
java.lang.String getBlockPoolId();
/**
* required string blockPoolId = 2;
*/
com.google.protobuf.ByteString
getBlockPoolIdBytes();
// repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
/**
* repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
*/
java.util.List
getBlocksList();
/**
* repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto getBlocks(int index);
/**
* repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
*/
int getBlocksCount();
/**
* repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProtoOrBuilder>
getBlocksOrBuilderList();
/**
* repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProtoOrBuilder getBlocksOrBuilder(
int index);
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.BlockReceivedAndDeletedRequestProto}
*
*
**
* registration - datanode registration information
* blockPoolID - block pool ID of the reported blocks
* blocks - Received/deleted block list
*
*/
public static final class BlockReceivedAndDeletedRequestProto extends
com.google.protobuf.GeneratedMessage
implements BlockReceivedAndDeletedRequestProtoOrBuilder {
// Use BlockReceivedAndDeletedRequestProto.newBuilder() to construct.
private BlockReceivedAndDeletedRequestProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private BlockReceivedAndDeletedRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final BlockReceivedAndDeletedRequestProto defaultInstance;
public static BlockReceivedAndDeletedRequestProto getDefaultInstance() {
return defaultInstance;
}
public BlockReceivedAndDeletedRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private BlockReceivedAndDeletedRequestProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = registration_.toBuilder();
}
registration_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(registration_);
registration_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
bitField0_ |= 0x00000002;
blockPoolId_ = input.readBytes();
break;
}
case 26: {
if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
blocks_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000004;
}
blocks_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.PARSER, extensionRegistry));
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
blocks_ = java.util.Collections.unmodifiableList(blocks_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReceivedAndDeletedRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReceivedAndDeletedRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public BlockReceivedAndDeletedRequestProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new BlockReceivedAndDeletedRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
public static final int REGISTRATION_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_;
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public boolean hasRegistration() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
return registration_;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
return registration_;
}
// required string blockPoolId = 2;
public static final int BLOCKPOOLID_FIELD_NUMBER = 2;
private java.lang.Object blockPoolId_;
/**
* required string blockPoolId = 2;
*/
public boolean hasBlockPoolId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string blockPoolId = 2;
*/
public java.lang.String getBlockPoolId() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
blockPoolId_ = s;
}
return s;
}
}
/**
* required string blockPoolId = 2;
*/
public com.google.protobuf.ByteString
getBlockPoolIdBytes() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
blockPoolId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
public static final int BLOCKS_FIELD_NUMBER = 3;
private java.util.List blocks_;
/**
* repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
*/
public java.util.List getBlocksList() {
return blocks_;
}
/**
* repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProtoOrBuilder>
getBlocksOrBuilderList() {
return blocks_;
}
/**
* repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
*/
public int getBlocksCount() {
return blocks_.size();
}
/**
* repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto getBlocks(int index) {
return blocks_.get(index);
}
/**
* repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProtoOrBuilder getBlocksOrBuilder(
int index) {
return blocks_.get(index);
}
private void initFields() {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
blockPoolId_ = "";
blocks_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasRegistration()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasBlockPoolId()) {
memoizedIsInitialized = 0;
return false;
}
if (!getRegistration().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getBlocksCount(); i++) {
if (!getBlocks(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, registration_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getBlockPoolIdBytes());
}
for (int i = 0; i < blocks_.size(); i++) {
output.writeMessage(3, blocks_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, registration_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getBlockPoolIdBytes());
}
for (int i = 0; i < blocks_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, blocks_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto) obj;
boolean result = true;
result = result && (hasRegistration() == other.hasRegistration());
if (hasRegistration()) {
result = result && getRegistration()
.equals(other.getRegistration());
}
result = result && (hasBlockPoolId() == other.hasBlockPoolId());
if (hasBlockPoolId()) {
result = result && getBlockPoolId()
.equals(other.getBlockPoolId());
}
result = result && getBlocksList()
.equals(other.getBlocksList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasRegistration()) {
hash = (37 * hash) + REGISTRATION_FIELD_NUMBER;
hash = (53 * hash) + getRegistration().hashCode();
}
if (hasBlockPoolId()) {
hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER;
hash = (53 * hash) + getBlockPoolId().hashCode();
}
if (getBlocksCount() > 0) {
hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
hash = (53 * hash) + getBlocksList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.BlockReceivedAndDeletedRequestProto}
*
*
**
* registration - datanode registration information
* blockPoolID - block pool ID of the reported blocks
* blocks - Received/deleted block list
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReceivedAndDeletedRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReceivedAndDeletedRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getRegistrationFieldBuilder();
getBlocksFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (registrationBuilder_ == null) {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
} else {
registrationBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
blockPoolId_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
if (blocksBuilder_ == null) {
blocks_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
} else {
blocksBuilder_.clear();
}
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReceivedAndDeletedRequestProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (registrationBuilder_ == null) {
result.registration_ = registration_;
} else {
result.registration_ = registrationBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.blockPoolId_ = blockPoolId_;
if (blocksBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004)) {
blocks_ = java.util.Collections.unmodifiableList(blocks_);
bitField0_ = (bitField0_ & ~0x00000004);
}
result.blocks_ = blocks_;
} else {
result.blocks_ = blocksBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.getDefaultInstance()) return this;
if (other.hasRegistration()) {
mergeRegistration(other.getRegistration());
}
if (other.hasBlockPoolId()) {
bitField0_ |= 0x00000002;
blockPoolId_ = other.blockPoolId_;
onChanged();
}
if (blocksBuilder_ == null) {
if (!other.blocks_.isEmpty()) {
if (blocks_.isEmpty()) {
blocks_ = other.blocks_;
bitField0_ = (bitField0_ & ~0x00000004);
} else {
ensureBlocksIsMutable();
blocks_.addAll(other.blocks_);
}
onChanged();
}
} else {
if (!other.blocks_.isEmpty()) {
if (blocksBuilder_.isEmpty()) {
blocksBuilder_.dispose();
blocksBuilder_ = null;
blocks_ = other.blocks_;
bitField0_ = (bitField0_ & ~0x00000004);
blocksBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getBlocksFieldBuilder() : null;
} else {
blocksBuilder_.addAllMessages(other.blocks_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasRegistration()) {
return false;
}
if (!hasBlockPoolId()) {
return false;
}
if (!getRegistration().isInitialized()) {
return false;
}
for (int i = 0; i < getBlocksCount(); i++) {
if (!getBlocks(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registrationBuilder_;
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public boolean hasRegistration() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
if (registrationBuilder_ == null) {
return registration_;
} else {
return registrationBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
if (registrationBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
registration_ = value;
onChanged();
} else {
registrationBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public Builder setRegistration(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) {
if (registrationBuilder_ == null) {
registration_ = builderForValue.build();
onChanged();
} else {
registrationBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
if (registrationBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
registration_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) {
registration_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial();
} else {
registration_ = value;
}
onChanged();
} else {
registrationBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public Builder clearRegistration() {
if (registrationBuilder_ == null) {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
onChanged();
} else {
registrationBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistrationBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getRegistrationFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
if (registrationBuilder_ != null) {
return registrationBuilder_.getMessageOrBuilder();
} else {
return registration_;
}
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>
getRegistrationFieldBuilder() {
if (registrationBuilder_ == null) {
registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>(
registration_,
getParentForChildren(),
isClean());
registration_ = null;
}
return registrationBuilder_;
}
// required string blockPoolId = 2;
private java.lang.Object blockPoolId_ = "";
/**
* required string blockPoolId = 2;
*/
public boolean hasBlockPoolId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string blockPoolId = 2;
*/
public java.lang.String getBlockPoolId() {
java.lang.Object ref = blockPoolId_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
blockPoolId_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string blockPoolId = 2;
*/
public com.google.protobuf.ByteString
getBlockPoolIdBytes() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
blockPoolId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string blockPoolId = 2;
*/
public Builder setBlockPoolId(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
blockPoolId_ = value;
onChanged();
return this;
}
/**
* required string blockPoolId = 2;
*/
public Builder clearBlockPoolId() {
bitField0_ = (bitField0_ & ~0x00000002);
blockPoolId_ = getDefaultInstance().getBlockPoolId();
onChanged();
return this;
}
/**
* required string blockPoolId = 2;
*/
public Builder setBlockPoolIdBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
blockPoolId_ = value;
onChanged();
return this;
}
// repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
private java.util.List blocks_ =
java.util.Collections.emptyList();
private void ensureBlocksIsMutable() {
if (!((bitField0_ & 0x00000004) == 0x00000004)) {
blocks_ = new java.util.ArrayList(blocks_);
bitField0_ |= 0x00000004;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProtoOrBuilder> blocksBuilder_;
/**
* repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
*/
public java.util.List getBlocksList() {
if (blocksBuilder_ == null) {
return java.util.Collections.unmodifiableList(blocks_);
} else {
return blocksBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
*/
public int getBlocksCount() {
if (blocksBuilder_ == null) {
return blocks_.size();
} else {
return blocksBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto getBlocks(int index) {
if (blocksBuilder_ == null) {
return blocks_.get(index);
} else {
return blocksBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
*/
public Builder setBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.set(index, value);
onChanged();
} else {
blocksBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
*/
public Builder setBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.set(index, builderForValue.build());
onChanged();
} else {
blocksBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
*/
public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.add(value);
onChanged();
} else {
blocksBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
*/
public Builder addBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.add(index, value);
onChanged();
} else {
blocksBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
*/
public Builder addBlocks(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.add(builderForValue.build());
onChanged();
} else {
blocksBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
*/
public Builder addBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.add(index, builderForValue.build());
onChanged();
} else {
blocksBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
*/
public Builder addAllBlocks(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto> values) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
super.addAll(values, blocks_);
onChanged();
} else {
blocksBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
*/
public Builder clearBlocks() {
if (blocksBuilder_ == null) {
blocks_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
onChanged();
} else {
blocksBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
*/
public Builder removeBlocks(int index) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.remove(index);
onChanged();
} else {
blocksBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder getBlocksBuilder(
int index) {
return getBlocksFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProtoOrBuilder getBlocksOrBuilder(
int index) {
if (blocksBuilder_ == null) {
return blocks_.get(index); } else {
return blocksBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProtoOrBuilder>
getBlocksOrBuilderList() {
if (blocksBuilder_ != null) {
return blocksBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(blocks_);
}
}
/**
* repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder addBlocksBuilder() {
return getBlocksFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder addBlocksBuilder(
int index) {
return getBlocksFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;
*/
public java.util.List
getBlocksBuilderList() {
return getBlocksFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProtoOrBuilder>
getBlocksFieldBuilder() {
if (blocksBuilder_ == null) {
blocksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProtoOrBuilder>(
blocks_,
((bitField0_ & 0x00000004) == 0x00000004),
getParentForChildren(),
isClean());
blocks_ = null;
}
return blocksBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.BlockReceivedAndDeletedRequestProto)
}
static {
defaultInstance = new BlockReceivedAndDeletedRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.BlockReceivedAndDeletedRequestProto)
}
public interface BlockReceivedAndDeletedResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.BlockReceivedAndDeletedResponseProto}
*
*
**
* void response
*
*/
public static final class BlockReceivedAndDeletedResponseProto extends
com.google.protobuf.GeneratedMessage
implements BlockReceivedAndDeletedResponseProtoOrBuilder {
// Use BlockReceivedAndDeletedResponseProto.newBuilder() to construct.
private BlockReceivedAndDeletedResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private BlockReceivedAndDeletedResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final BlockReceivedAndDeletedResponseProto defaultInstance;
public static BlockReceivedAndDeletedResponseProto getDefaultInstance() {
return defaultInstance;
}
public BlockReceivedAndDeletedResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private BlockReceivedAndDeletedResponseProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReceivedAndDeletedResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReceivedAndDeletedResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public BlockReceivedAndDeletedResponseProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new BlockReceivedAndDeletedResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.BlockReceivedAndDeletedResponseProto}
*
*
**
* void response
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReceivedAndDeletedResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReceivedAndDeletedResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReceivedAndDeletedResponseProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.BlockReceivedAndDeletedResponseProto)
}
static {
defaultInstance = new BlockReceivedAndDeletedResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.BlockReceivedAndDeletedResponseProto)
}
public interface ErrorReportRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;
*
*
* Registartion info
*
*/
boolean hasRegistartion();
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;
*
*
* Registartion info
*
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistartion();
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;
*
*
* Registartion info
*
*/
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistartionOrBuilder();
// required uint32 errorCode = 2;
/**
* required uint32 errorCode = 2;
*
*
* Error code
*
*/
boolean hasErrorCode();
/**
* required uint32 errorCode = 2;
*
*
* Error code
*
*/
int getErrorCode();
// required string msg = 3;
/**
* required string msg = 3;
*
*
* Error message
*
*/
boolean hasMsg();
/**
* required string msg = 3;
*
*
* Error message
*
*/
java.lang.String getMsg();
/**
* required string msg = 3;
*
*
* Error message
*
*/
com.google.protobuf.ByteString
getMsgBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.ErrorReportRequestProto}
*
*
**
* registartion - Datanode reporting the error
* errorCode - error code indicating the error
* msg - Free text description of the error
*
*/
public static final class ErrorReportRequestProto extends
com.google.protobuf.GeneratedMessage
implements ErrorReportRequestProtoOrBuilder {
// Use ErrorReportRequestProto.newBuilder() to construct.
private ErrorReportRequestProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ErrorReportRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ErrorReportRequestProto defaultInstance;
public static ErrorReportRequestProto getDefaultInstance() {
return defaultInstance;
}
public ErrorReportRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ErrorReportRequestProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = registartion_.toBuilder();
}
registartion_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(registartion_);
registartion_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 16: {
bitField0_ |= 0x00000002;
errorCode_ = input.readUInt32();
break;
}
case 26: {
bitField0_ |= 0x00000004;
msg_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ErrorReportRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ErrorReportRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ErrorReportRequestProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ErrorReportRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
/**
* Protobuf enum {@code hadoop.hdfs.datanode.ErrorReportRequestProto.ErrorCode}
*/
public enum ErrorCode
implements com.google.protobuf.ProtocolMessageEnum {
/**
* NOTIFY = 0;
*
*
* Error report to be logged at the namenode
*
*/
NOTIFY(0, 0),
/**
* DISK_ERROR = 1;
*
*
* DN has disk errors but still has valid volumes
*
*/
DISK_ERROR(1, 1),
/**
* INVALID_BLOCK = 2;
*
*
* Command from namenode has invalid block ID
*
*/
INVALID_BLOCK(2, 2),
/**
* FATAL_DISK_ERROR = 3;
*
*
* No valid volumes left on datanode
*
*/
FATAL_DISK_ERROR(3, 3),
;
/**
* NOTIFY = 0;
*
*
* Error report to be logged at the namenode
*
*/
public static final int NOTIFY_VALUE = 0;
/**
* DISK_ERROR = 1;
*
*
* DN has disk errors but still has valid volumes
*
*/
public static final int DISK_ERROR_VALUE = 1;
/**
* INVALID_BLOCK = 2;
*
*
* Command from namenode has invalid block ID
*
*/
public static final int INVALID_BLOCK_VALUE = 2;
/**
* FATAL_DISK_ERROR = 3;
*
*
* No valid volumes left on datanode
*
*/
public static final int FATAL_DISK_ERROR_VALUE = 3;
public final int getNumber() { return value; }
public static ErrorCode valueOf(int value) {
switch (value) {
case 0: return NOTIFY;
case 1: return DISK_ERROR;
case 2: return INVALID_BLOCK;
case 3: return FATAL_DISK_ERROR;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public ErrorCode findValueByNumber(int number) {
return ErrorCode.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.getDescriptor().getEnumTypes().get(0);
}
private static final ErrorCode[] VALUES = values();
public static ErrorCode valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private ErrorCode(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.datanode.ErrorReportRequestProto.ErrorCode)
}
private int bitField0_;
// required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;
public static final int REGISTARTION_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registartion_;
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;
*
*
* Registartion info
*
*/
public boolean hasRegistartion() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;
*
*
* Registartion info
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistartion() {
return registartion_;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;
*
*
* Registartion info
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistartionOrBuilder() {
return registartion_;
}
// required uint32 errorCode = 2;
public static final int ERRORCODE_FIELD_NUMBER = 2;
private int errorCode_;
/**
* required uint32 errorCode = 2;
*
*
* Error code
*
*/
public boolean hasErrorCode() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint32 errorCode = 2;
*
*
* Error code
*
*/
public int getErrorCode() {
return errorCode_;
}
// required string msg = 3;
public static final int MSG_FIELD_NUMBER = 3;
private java.lang.Object msg_;
/**
* required string msg = 3;
*
*
* Error message
*
*/
public boolean hasMsg() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required string msg = 3;
*
*
* Error message
*
*/
public java.lang.String getMsg() {
java.lang.Object ref = msg_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
msg_ = s;
}
return s;
}
}
/**
* required string msg = 3;
*
*
* Error message
*
*/
public com.google.protobuf.ByteString
getMsgBytes() {
java.lang.Object ref = msg_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
msg_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
registartion_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
errorCode_ = 0;
msg_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasRegistartion()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasErrorCode()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasMsg()) {
memoizedIsInitialized = 0;
return false;
}
if (!getRegistartion().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, registartion_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt32(2, errorCode_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, getMsgBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, registartion_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(2, errorCode_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, getMsgBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto) obj;
boolean result = true;
result = result && (hasRegistartion() == other.hasRegistartion());
if (hasRegistartion()) {
result = result && getRegistartion()
.equals(other.getRegistartion());
}
result = result && (hasErrorCode() == other.hasErrorCode());
if (hasErrorCode()) {
result = result && (getErrorCode()
== other.getErrorCode());
}
result = result && (hasMsg() == other.hasMsg());
if (hasMsg()) {
result = result && getMsg()
.equals(other.getMsg());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasRegistartion()) {
hash = (37 * hash) + REGISTARTION_FIELD_NUMBER;
hash = (53 * hash) + getRegistartion().hashCode();
}
if (hasErrorCode()) {
hash = (37 * hash) + ERRORCODE_FIELD_NUMBER;
hash = (53 * hash) + getErrorCode();
}
if (hasMsg()) {
hash = (37 * hash) + MSG_FIELD_NUMBER;
hash = (53 * hash) + getMsg().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.ErrorReportRequestProto}
*
*
**
* registartion - Datanode reporting the error
* errorCode - error code indicating the error
* msg - Free text description of the error
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ErrorReportRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ErrorReportRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getRegistartionFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (registartionBuilder_ == null) {
registartion_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
} else {
registartionBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
errorCode_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
msg_ = "";
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ErrorReportRequestProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (registartionBuilder_ == null) {
result.registartion_ = registartion_;
} else {
result.registartion_ = registartionBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.errorCode_ = errorCode_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.msg_ = msg_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance()) return this;
if (other.hasRegistartion()) {
mergeRegistartion(other.getRegistartion());
}
if (other.hasErrorCode()) {
setErrorCode(other.getErrorCode());
}
if (other.hasMsg()) {
bitField0_ |= 0x00000004;
msg_ = other.msg_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasRegistartion()) {
return false;
}
if (!hasErrorCode()) {
return false;
}
if (!hasMsg()) {
return false;
}
if (!getRegistartion().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registartion_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registartionBuilder_;
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;
*
*
* Registartion info
*
*/
public boolean hasRegistartion() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;
*
*
* Registartion info
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistartion() {
if (registartionBuilder_ == null) {
return registartion_;
} else {
return registartionBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;
*
*
* Registartion info
*
*/
public Builder setRegistartion(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
if (registartionBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
registartion_ = value;
onChanged();
} else {
registartionBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;
*
*
* Registartion info
*
*/
public Builder setRegistartion(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) {
if (registartionBuilder_ == null) {
registartion_ = builderForValue.build();
onChanged();
} else {
registartionBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;
*
*
* Registartion info
*
*/
public Builder mergeRegistartion(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
if (registartionBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
registartion_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) {
registartion_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(registartion_).mergeFrom(value).buildPartial();
} else {
registartion_ = value;
}
onChanged();
} else {
registartionBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;
*
*
* Registartion info
*
*/
public Builder clearRegistartion() {
if (registartionBuilder_ == null) {
registartion_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
onChanged();
} else {
registartionBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;
*
*
* Registartion info
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistartionBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getRegistartionFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;
*
*
* Registartion info
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistartionOrBuilder() {
if (registartionBuilder_ != null) {
return registartionBuilder_.getMessageOrBuilder();
} else {
return registartion_;
}
}
/**
* required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;
*
*
* Registartion info
*
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>
getRegistartionFieldBuilder() {
if (registartionBuilder_ == null) {
registartionBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>(
registartion_,
getParentForChildren(),
isClean());
registartion_ = null;
}
return registartionBuilder_;
}
// required uint32 errorCode = 2;
private int errorCode_ ;
/**
* required uint32 errorCode = 2;
*
*
* Error code
*
*/
public boolean hasErrorCode() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint32 errorCode = 2;
*
*
* Error code
*
*/
public int getErrorCode() {
return errorCode_;
}
/**
* required uint32 errorCode = 2;
*
*
* Error code
*
*/
public Builder setErrorCode(int value) {
bitField0_ |= 0x00000002;
errorCode_ = value;
onChanged();
return this;
}
/**
* required uint32 errorCode = 2;
*
*
* Error code
*
*/
public Builder clearErrorCode() {
bitField0_ = (bitField0_ & ~0x00000002);
errorCode_ = 0;
onChanged();
return this;
}
// required string msg = 3;
private java.lang.Object msg_ = "";
/**
* required string msg = 3;
*
*
* Error message
*
*/
public boolean hasMsg() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required string msg = 3;
*
*
* Error message
*
*/
public java.lang.String getMsg() {
java.lang.Object ref = msg_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
msg_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string msg = 3;
*
*
* Error message
*
*/
public com.google.protobuf.ByteString
getMsgBytes() {
java.lang.Object ref = msg_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
msg_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string msg = 3;
*
*
* Error message
*
*/
public Builder setMsg(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
msg_ = value;
onChanged();
return this;
}
/**
* required string msg = 3;
*
*
* Error message
*
*/
public Builder clearMsg() {
bitField0_ = (bitField0_ & ~0x00000004);
msg_ = getDefaultInstance().getMsg();
onChanged();
return this;
}
/**
* required string msg = 3;
*
*
* Error message
*
*/
public Builder setMsgBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
msg_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.ErrorReportRequestProto)
}
static {
defaultInstance = new ErrorReportRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.ErrorReportRequestProto)
}
public interface ErrorReportResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.ErrorReportResponseProto}
*
*
**
* void response
*
*/
public static final class ErrorReportResponseProto extends
com.google.protobuf.GeneratedMessage
implements ErrorReportResponseProtoOrBuilder {
// Use ErrorReportResponseProto.newBuilder() to construct.
private ErrorReportResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ErrorReportResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ErrorReportResponseProto defaultInstance;
public static ErrorReportResponseProto getDefaultInstance() {
return defaultInstance;
}
public ErrorReportResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ErrorReportResponseProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ErrorReportResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ErrorReportResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ErrorReportResponseProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ErrorReportResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.datanode.ErrorReportResponseProto}
*
*
**
* void response
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ErrorReportResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ErrorReportResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ErrorReportResponseProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.ErrorReportResponseProto)
}
static {
defaultInstance = new ErrorReportResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.ErrorReportResponseProto)
}
public interface ReportBadBlocksRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated .hadoop.hdfs.LocatedBlockProto blocks = 1;
/**
*