![JAR search and dependency download from the Maven repository](/logo.png)
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos Maven / Gradle / Ivy
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: DatanodeProtocol.proto
package org.apache.hadoop.hdfs.protocol.proto;
public final class DatanodeProtocolProtos {
private DatanodeProtocolProtos() {}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
}
public interface DatanodeRegistrationProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .DatanodeIDProto datanodeID = 1;
boolean hasDatanodeID();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodeID();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeIDOrBuilder();
// required .StorageInfoProto storageInfo = 2;
boolean hasStorageInfo();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder();
// required .ExportedBlockKeysProto keys = 3;
boolean hasKeys();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto getKeys();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder();
// required string softwareVersion = 4;
boolean hasSoftwareVersion();
String getSoftwareVersion();
}
public static final class DatanodeRegistrationProto extends
com.google.protobuf.GeneratedMessage
implements DatanodeRegistrationProtoOrBuilder {
// Use DatanodeRegistrationProto.newBuilder() to construct.
private DatanodeRegistrationProto(Builder builder) {
super(builder);
}
private DatanodeRegistrationProto(boolean noInit) {}
private static final DatanodeRegistrationProto defaultInstance;
public static DatanodeRegistrationProto getDefaultInstance() {
return defaultInstance;
}
public DatanodeRegistrationProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_DatanodeRegistrationProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_DatanodeRegistrationProto_fieldAccessorTable;
}
private int bitField0_;
// required .DatanodeIDProto datanodeID = 1;
public static final int DATANODEID_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto datanodeID_;
public boolean hasDatanodeID() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodeID() {
return datanodeID_;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeIDOrBuilder() {
return datanodeID_;
}
// required .StorageInfoProto storageInfo = 2;
public static final int STORAGEINFO_FIELD_NUMBER = 2;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto storageInfo_;
public boolean hasStorageInfo() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo() {
return storageInfo_;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() {
return storageInfo_;
}
// required .ExportedBlockKeysProto keys = 3;
public static final int KEYS_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto keys_;
public boolean hasKeys() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto getKeys() {
return keys_;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder() {
return keys_;
}
// required string softwareVersion = 4;
public static final int SOFTWAREVERSION_FIELD_NUMBER = 4;
private java.lang.Object softwareVersion_;
public boolean hasSoftwareVersion() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
public String getSoftwareVersion() {
java.lang.Object ref = softwareVersion_;
if (ref instanceof String) {
return (String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
String s = bs.toStringUtf8();
if (com.google.protobuf.Internal.isValidUtf8(bs)) {
softwareVersion_ = s;
}
return s;
}
}
private com.google.protobuf.ByteString getSoftwareVersionBytes() {
java.lang.Object ref = softwareVersion_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8((String) ref);
softwareVersion_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
datanodeID_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance();
storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance();
keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance();
softwareVersion_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasDatanodeID()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasStorageInfo()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasKeys()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasSoftwareVersion()) {
memoizedIsInitialized = 0;
return false;
}
if (!getDatanodeID().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
if (!getStorageInfo().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
if (!getKeys().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, datanodeID_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeMessage(2, storageInfo_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeMessage(3, keys_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeBytes(4, getSoftwareVersionBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, datanodeID_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, storageInfo_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, keys_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(4, getSoftwareVersionBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto) obj;
boolean result = true;
result = result && (hasDatanodeID() == other.hasDatanodeID());
if (hasDatanodeID()) {
result = result && getDatanodeID()
.equals(other.getDatanodeID());
}
result = result && (hasStorageInfo() == other.hasStorageInfo());
if (hasStorageInfo()) {
result = result && getStorageInfo()
.equals(other.getStorageInfo());
}
result = result && (hasKeys() == other.hasKeys());
if (hasKeys()) {
result = result && getKeys()
.equals(other.getKeys());
}
result = result && (hasSoftwareVersion() == other.hasSoftwareVersion());
if (hasSoftwareVersion()) {
result = result && getSoftwareVersion()
.equals(other.getSoftwareVersion());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasDatanodeID()) {
hash = (37 * hash) + DATANODEID_FIELD_NUMBER;
hash = (53 * hash) + getDatanodeID().hashCode();
}
if (hasStorageInfo()) {
hash = (37 * hash) + STORAGEINFO_FIELD_NUMBER;
hash = (53 * hash) + getStorageInfo().hashCode();
}
if (hasKeys()) {
hash = (37 * hash) + KEYS_FIELD_NUMBER;
hash = (53 * hash) + getKeys().hashCode();
}
if (hasSoftwareVersion()) {
hash = (37 * hash) + SOFTWAREVERSION_FIELD_NUMBER;
hash = (53 * hash) + getSoftwareVersion().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_DatanodeRegistrationProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_DatanodeRegistrationProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getDatanodeIDFieldBuilder();
getStorageInfoFieldBuilder();
getKeysFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (datanodeIDBuilder_ == null) {
datanodeID_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance();
} else {
datanodeIDBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
if (storageInfoBuilder_ == null) {
storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance();
} else {
storageInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
if (keysBuilder_ == null) {
keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance();
} else {
keysBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
softwareVersion_ = "";
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (datanodeIDBuilder_ == null) {
result.datanodeID_ = datanodeID_;
} else {
result.datanodeID_ = datanodeIDBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
if (storageInfoBuilder_ == null) {
result.storageInfo_ = storageInfo_;
} else {
result.storageInfo_ = storageInfoBuilder_.build();
}
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
if (keysBuilder_ == null) {
result.keys_ = keys_;
} else {
result.keys_ = keysBuilder_.build();
}
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.softwareVersion_ = softwareVersion_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) return this;
if (other.hasDatanodeID()) {
mergeDatanodeID(other.getDatanodeID());
}
if (other.hasStorageInfo()) {
mergeStorageInfo(other.getStorageInfo());
}
if (other.hasKeys()) {
mergeKeys(other.getKeys());
}
if (other.hasSoftwareVersion()) {
setSoftwareVersion(other.getSoftwareVersion());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasDatanodeID()) {
return false;
}
if (!hasStorageInfo()) {
return false;
}
if (!hasKeys()) {
return false;
}
if (!hasSoftwareVersion()) {
return false;
}
if (!getDatanodeID().isInitialized()) {
return false;
}
if (!getStorageInfo().isInitialized()) {
return false;
}
if (!getKeys().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder();
if (hasDatanodeID()) {
subBuilder.mergeFrom(getDatanodeID());
}
input.readMessage(subBuilder, extensionRegistry);
setDatanodeID(subBuilder.buildPartial());
break;
}
case 18: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.newBuilder();
if (hasStorageInfo()) {
subBuilder.mergeFrom(getStorageInfo());
}
input.readMessage(subBuilder, extensionRegistry);
setStorageInfo(subBuilder.buildPartial());
break;
}
case 26: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.newBuilder();
if (hasKeys()) {
subBuilder.mergeFrom(getKeys());
}
input.readMessage(subBuilder, extensionRegistry);
setKeys(subBuilder.buildPartial());
break;
}
case 34: {
bitField0_ |= 0x00000008;
softwareVersion_ = input.readBytes();
break;
}
}
}
}
private int bitField0_;
// required .DatanodeIDProto datanodeID = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto datanodeID_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> datanodeIDBuilder_;
public boolean hasDatanodeID() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodeID() {
if (datanodeIDBuilder_ == null) {
return datanodeID_;
} else {
return datanodeIDBuilder_.getMessage();
}
}
public Builder setDatanodeID(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
if (datanodeIDBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
datanodeID_ = value;
onChanged();
} else {
datanodeIDBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder setDatanodeID(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) {
if (datanodeIDBuilder_ == null) {
datanodeID_ = builderForValue.build();
onChanged();
} else {
datanodeIDBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
public Builder mergeDatanodeID(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
if (datanodeIDBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
datanodeID_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()) {
datanodeID_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder(datanodeID_).mergeFrom(value).buildPartial();
} else {
datanodeID_ = value;
}
onChanged();
} else {
datanodeIDBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder clearDatanodeID() {
if (datanodeIDBuilder_ == null) {
datanodeID_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance();
onChanged();
} else {
datanodeIDBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder getDatanodeIDBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getDatanodeIDFieldBuilder().getBuilder();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeIDOrBuilder() {
if (datanodeIDBuilder_ != null) {
return datanodeIDBuilder_.getMessageOrBuilder();
} else {
return datanodeID_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>
getDatanodeIDFieldBuilder() {
if (datanodeIDBuilder_ == null) {
datanodeIDBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>(
datanodeID_,
getParentForChildren(),
isClean());
datanodeID_ = null;
}
return datanodeIDBuilder_;
}
// required .StorageInfoProto storageInfo = 2;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder> storageInfoBuilder_;
public boolean hasStorageInfo() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo() {
if (storageInfoBuilder_ == null) {
return storageInfo_;
} else {
return storageInfoBuilder_.getMessage();
}
}
public Builder setStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) {
if (storageInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
storageInfo_ = value;
onChanged();
} else {
storageInfoBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
return this;
}
public Builder setStorageInfo(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder builderForValue) {
if (storageInfoBuilder_ == null) {
storageInfo_ = builderForValue.build();
onChanged();
} else {
storageInfoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
return this;
}
public Builder mergeStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) {
if (storageInfoBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002) &&
storageInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance()) {
storageInfo_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.newBuilder(storageInfo_).mergeFrom(value).buildPartial();
} else {
storageInfo_ = value;
}
onChanged();
} else {
storageInfoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
return this;
}
public Builder clearStorageInfo() {
if (storageInfoBuilder_ == null) {
storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance();
onChanged();
} else {
storageInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder getStorageInfoBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getStorageInfoFieldBuilder().getBuilder();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() {
if (storageInfoBuilder_ != null) {
return storageInfoBuilder_.getMessageOrBuilder();
} else {
return storageInfo_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder>
getStorageInfoFieldBuilder() {
if (storageInfoBuilder_ == null) {
storageInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder>(
storageInfo_,
getParentForChildren(),
isClean());
storageInfo_ = null;
}
return storageInfoBuilder_;
}
// required .ExportedBlockKeysProto keys = 3;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder> keysBuilder_;
public boolean hasKeys() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto getKeys() {
if (keysBuilder_ == null) {
return keys_;
} else {
return keysBuilder_.getMessage();
}
}
public Builder setKeys(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto value) {
if (keysBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
keys_ = value;
onChanged();
} else {
keysBuilder_.setMessage(value);
}
bitField0_ |= 0x00000004;
return this;
}
public Builder setKeys(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder builderForValue) {
if (keysBuilder_ == null) {
keys_ = builderForValue.build();
onChanged();
} else {
keysBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000004;
return this;
}
public Builder mergeKeys(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto value) {
if (keysBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004) &&
keys_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance()) {
keys_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.newBuilder(keys_).mergeFrom(value).buildPartial();
} else {
keys_ = value;
}
onChanged();
} else {
keysBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000004;
return this;
}
public Builder clearKeys() {
if (keysBuilder_ == null) {
keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance();
onChanged();
} else {
keysBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder getKeysBuilder() {
bitField0_ |= 0x00000004;
onChanged();
return getKeysFieldBuilder().getBuilder();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder() {
if (keysBuilder_ != null) {
return keysBuilder_.getMessageOrBuilder();
} else {
return keys_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder>
getKeysFieldBuilder() {
if (keysBuilder_ == null) {
keysBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder>(
keys_,
getParentForChildren(),
isClean());
keys_ = null;
}
return keysBuilder_;
}
// required string softwareVersion = 4;
private java.lang.Object softwareVersion_ = "";
public boolean hasSoftwareVersion() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
public String getSoftwareVersion() {
java.lang.Object ref = softwareVersion_;
if (!(ref instanceof String)) {
String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
softwareVersion_ = s;
return s;
} else {
return (String) ref;
}
}
public Builder setSoftwareVersion(String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
softwareVersion_ = value;
onChanged();
return this;
}
public Builder clearSoftwareVersion() {
bitField0_ = (bitField0_ & ~0x00000008);
softwareVersion_ = getDefaultInstance().getSoftwareVersion();
onChanged();
return this;
}
void setSoftwareVersion(com.google.protobuf.ByteString value) {
bitField0_ |= 0x00000008;
softwareVersion_ = value;
onChanged();
}
// @@protoc_insertion_point(builder_scope:DatanodeRegistrationProto)
}
static {
defaultInstance = new DatanodeRegistrationProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:DatanodeRegistrationProto)
}
public interface DatanodeStorageProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string storageID = 1;
boolean hasStorageID();
String getStorageID();
// optional .DatanodeStorageProto.StorageState state = 2 [default = NORMAL];
boolean hasState();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.StorageState getState();
}
public static final class DatanodeStorageProto extends
com.google.protobuf.GeneratedMessage
implements DatanodeStorageProtoOrBuilder {
// Use DatanodeStorageProto.newBuilder() to construct.
private DatanodeStorageProto(Builder builder) {
super(builder);
}
private DatanodeStorageProto(boolean noInit) {}
private static final DatanodeStorageProto defaultInstance;
public static DatanodeStorageProto getDefaultInstance() {
return defaultInstance;
}
public DatanodeStorageProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_DatanodeStorageProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_DatanodeStorageProto_fieldAccessorTable;
}
public enum StorageState
implements com.google.protobuf.ProtocolMessageEnum {
NORMAL(0, 0),
READ_ONLY(1, 1),
;
public static final int NORMAL_VALUE = 0;
public static final int READ_ONLY_VALUE = 1;
public final int getNumber() { return value; }
public static StorageState valueOf(int value) {
switch (value) {
case 0: return NORMAL;
case 1: return READ_ONLY;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public StorageState findValueByNumber(int number) {
return StorageState.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.getDescriptor().getEnumTypes().get(0);
}
private static final StorageState[] VALUES = {
NORMAL, READ_ONLY,
};
public static StorageState valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private StorageState(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:DatanodeStorageProto.StorageState)
}
private int bitField0_;
// required string storageID = 1;
public static final int STORAGEID_FIELD_NUMBER = 1;
private java.lang.Object storageID_;
public boolean hasStorageID() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public String getStorageID() {
java.lang.Object ref = storageID_;
if (ref instanceof String) {
return (String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
String s = bs.toStringUtf8();
if (com.google.protobuf.Internal.isValidUtf8(bs)) {
storageID_ = s;
}
return s;
}
}
private com.google.protobuf.ByteString getStorageIDBytes() {
java.lang.Object ref = storageID_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8((String) ref);
storageID_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// optional .DatanodeStorageProto.StorageState state = 2 [default = NORMAL];
public static final int STATE_FIELD_NUMBER = 2;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.StorageState state_;
public boolean hasState() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.StorageState getState() {
return state_;
}
private void initFields() {
storageID_ = "";
state_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.StorageState.NORMAL;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasStorageID()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getStorageIDBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeEnum(2, state_.getNumber());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getStorageIDBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(2, state_.getNumber());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto) obj;
boolean result = true;
result = result && (hasStorageID() == other.hasStorageID());
if (hasStorageID()) {
result = result && getStorageID()
.equals(other.getStorageID());
}
result = result && (hasState() == other.hasState());
if (hasState()) {
result = result &&
(getState() == other.getState());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasStorageID()) {
hash = (37 * hash) + STORAGEID_FIELD_NUMBER;
hash = (53 * hash) + getStorageID().hashCode();
}
if (hasState()) {
hash = (37 * hash) + STATE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getState());
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_DatanodeStorageProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_DatanodeStorageProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
storageID_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
state_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.StorageState.NORMAL;
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.storageID_ = storageID_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.state_ = state_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.getDefaultInstance()) return this;
if (other.hasStorageID()) {
setStorageID(other.getStorageID());
}
if (other.hasState()) {
setState(other.getState());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasStorageID()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
storageID_ = input.readBytes();
break;
}
case 16: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.StorageState value = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.StorageState.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(2, rawValue);
} else {
bitField0_ |= 0x00000002;
state_ = value;
}
break;
}
}
}
}
private int bitField0_;
// required string storageID = 1;
private java.lang.Object storageID_ = "";
public boolean hasStorageID() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public String getStorageID() {
java.lang.Object ref = storageID_;
if (!(ref instanceof String)) {
String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
storageID_ = s;
return s;
} else {
return (String) ref;
}
}
public Builder setStorageID(String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
storageID_ = value;
onChanged();
return this;
}
public Builder clearStorageID() {
bitField0_ = (bitField0_ & ~0x00000001);
storageID_ = getDefaultInstance().getStorageID();
onChanged();
return this;
}
void setStorageID(com.google.protobuf.ByteString value) {
bitField0_ |= 0x00000001;
storageID_ = value;
onChanged();
}
// optional .DatanodeStorageProto.StorageState state = 2 [default = NORMAL];
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.StorageState state_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.StorageState.NORMAL;
public boolean hasState() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.StorageState getState() {
return state_;
}
public Builder setState(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.StorageState value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
state_ = value;
onChanged();
return this;
}
public Builder clearState() {
bitField0_ = (bitField0_ & ~0x00000002);
state_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.StorageState.NORMAL;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:DatanodeStorageProto)
}
static {
defaultInstance = new DatanodeStorageProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:DatanodeStorageProto)
}
public interface DatanodeCommandProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .DatanodeCommandProto.Type cmdType = 1;
boolean hasCmdType();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type getCmdType();
// optional .BalancerBandwidthCommandProto balancerCmd = 2;
boolean hasBalancerCmd();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto getBalancerCmd();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder getBalancerCmdOrBuilder();
// optional .BlockCommandProto blkCmd = 3;
boolean hasBlkCmd();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto getBlkCmd();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder getBlkCmdOrBuilder();
// optional .BlockRecoveryCommandProto recoveryCmd = 4;
boolean hasRecoveryCmd();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto getRecoveryCmd();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProtoOrBuilder getRecoveryCmdOrBuilder();
// optional .FinalizeCommandProto finalizeCmd = 5;
boolean hasFinalizeCmd();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto getFinalizeCmd();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder getFinalizeCmdOrBuilder();
// optional .KeyUpdateCommandProto keyUpdateCmd = 6;
boolean hasKeyUpdateCmd();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto getKeyUpdateCmd();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder getKeyUpdateCmdOrBuilder();
// optional .RegisterCommandProto registerCmd = 7;
boolean hasRegisterCmd();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto getRegisterCmd();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder getRegisterCmdOrBuilder();
// optional .UpgradeCommandProto upgradeCmd = 8;
boolean hasUpgradeCmd();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto getUpgradeCmd();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder getUpgradeCmdOrBuilder();
}
public static final class DatanodeCommandProto extends
com.google.protobuf.GeneratedMessage
implements DatanodeCommandProtoOrBuilder {
// Use DatanodeCommandProto.newBuilder() to construct.
private DatanodeCommandProto(Builder builder) {
super(builder);
}
private DatanodeCommandProto(boolean noInit) {}
private static final DatanodeCommandProto defaultInstance;
public static DatanodeCommandProto getDefaultInstance() {
return defaultInstance;
}
public DatanodeCommandProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_DatanodeCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_DatanodeCommandProto_fieldAccessorTable;
}
public enum Type
implements com.google.protobuf.ProtocolMessageEnum {
BalancerBandwidthCommand(0, 0),
BlockCommand(1, 1),
BlockRecoveryCommand(2, 2),
FinalizeCommand(3, 3),
KeyUpdateCommand(4, 4),
RegisterCommand(5, 5),
UpgradeCommand(6, 6),
NullDatanodeCommand(7, 7),
;
public static final int BalancerBandwidthCommand_VALUE = 0;
public static final int BlockCommand_VALUE = 1;
public static final int BlockRecoveryCommand_VALUE = 2;
public static final int FinalizeCommand_VALUE = 3;
public static final int KeyUpdateCommand_VALUE = 4;
public static final int RegisterCommand_VALUE = 5;
public static final int UpgradeCommand_VALUE = 6;
public static final int NullDatanodeCommand_VALUE = 7;
public final int getNumber() { return value; }
public static Type valueOf(int value) {
switch (value) {
case 0: return BalancerBandwidthCommand;
case 1: return BlockCommand;
case 2: return BlockRecoveryCommand;
case 3: return FinalizeCommand;
case 4: return KeyUpdateCommand;
case 5: return RegisterCommand;
case 6: return UpgradeCommand;
case 7: return NullDatanodeCommand;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public Type findValueByNumber(int number) {
return Type.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDescriptor().getEnumTypes().get(0);
}
private static final Type[] VALUES = {
BalancerBandwidthCommand, BlockCommand, BlockRecoveryCommand, FinalizeCommand, KeyUpdateCommand, RegisterCommand, UpgradeCommand, NullDatanodeCommand,
};
public static Type valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private Type(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:DatanodeCommandProto.Type)
}
private int bitField0_;
// required .DatanodeCommandProto.Type cmdType = 1;
public static final int CMDTYPE_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type cmdType_;
public boolean hasCmdType() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type getCmdType() {
return cmdType_;
}
// optional .BalancerBandwidthCommandProto balancerCmd = 2;
public static final int BALANCERCMD_FIELD_NUMBER = 2;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto balancerCmd_;
public boolean hasBalancerCmd() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto getBalancerCmd() {
return balancerCmd_;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder getBalancerCmdOrBuilder() {
return balancerCmd_;
}
// optional .BlockCommandProto blkCmd = 3;
public static final int BLKCMD_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto blkCmd_;
public boolean hasBlkCmd() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto getBlkCmd() {
return blkCmd_;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder getBlkCmdOrBuilder() {
return blkCmd_;
}
// optional .BlockRecoveryCommandProto recoveryCmd = 4;
public static final int RECOVERYCMD_FIELD_NUMBER = 4;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto recoveryCmd_;
public boolean hasRecoveryCmd() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto getRecoveryCmd() {
return recoveryCmd_;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProtoOrBuilder getRecoveryCmdOrBuilder() {
return recoveryCmd_;
}
// optional .FinalizeCommandProto finalizeCmd = 5;
public static final int FINALIZECMD_FIELD_NUMBER = 5;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto finalizeCmd_;
public boolean hasFinalizeCmd() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto getFinalizeCmd() {
return finalizeCmd_;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder getFinalizeCmdOrBuilder() {
return finalizeCmd_;
}
// optional .KeyUpdateCommandProto keyUpdateCmd = 6;
public static final int KEYUPDATECMD_FIELD_NUMBER = 6;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto keyUpdateCmd_;
public boolean hasKeyUpdateCmd() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto getKeyUpdateCmd() {
return keyUpdateCmd_;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder getKeyUpdateCmdOrBuilder() {
return keyUpdateCmd_;
}
// optional .RegisterCommandProto registerCmd = 7;
public static final int REGISTERCMD_FIELD_NUMBER = 7;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto registerCmd_;
public boolean hasRegisterCmd() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto getRegisterCmd() {
return registerCmd_;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder getRegisterCmdOrBuilder() {
return registerCmd_;
}
// optional .UpgradeCommandProto upgradeCmd = 8;
public static final int UPGRADECMD_FIELD_NUMBER = 8;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto upgradeCmd_;
public boolean hasUpgradeCmd() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto getUpgradeCmd() {
return upgradeCmd_;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder getUpgradeCmdOrBuilder() {
return upgradeCmd_;
}
private void initFields() {
cmdType_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.BalancerBandwidthCommand;
balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance();
blkCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance();
recoveryCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance();
finalizeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance();
keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance();
registerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance();
upgradeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasCmdType()) {
memoizedIsInitialized = 0;
return false;
}
if (hasBalancerCmd()) {
if (!getBalancerCmd().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasBlkCmd()) {
if (!getBlkCmd().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasRecoveryCmd()) {
if (!getRecoveryCmd().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasFinalizeCmd()) {
if (!getFinalizeCmd().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasKeyUpdateCmd()) {
if (!getKeyUpdateCmd().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasUpgradeCmd()) {
if (!getUpgradeCmd().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, cmdType_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeMessage(2, balancerCmd_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeMessage(3, blkCmd_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeMessage(4, recoveryCmd_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeMessage(5, finalizeCmd_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeMessage(6, keyUpdateCmd_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeMessage(7, registerCmd_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
output.writeMessage(8, upgradeCmd_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, cmdType_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, balancerCmd_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, blkCmd_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(4, recoveryCmd_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(5, finalizeCmd_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(6, keyUpdateCmd_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(7, registerCmd_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(8, upgradeCmd_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto) obj;
boolean result = true;
result = result && (hasCmdType() == other.hasCmdType());
if (hasCmdType()) {
result = result &&
(getCmdType() == other.getCmdType());
}
result = result && (hasBalancerCmd() == other.hasBalancerCmd());
if (hasBalancerCmd()) {
result = result && getBalancerCmd()
.equals(other.getBalancerCmd());
}
result = result && (hasBlkCmd() == other.hasBlkCmd());
if (hasBlkCmd()) {
result = result && getBlkCmd()
.equals(other.getBlkCmd());
}
result = result && (hasRecoveryCmd() == other.hasRecoveryCmd());
if (hasRecoveryCmd()) {
result = result && getRecoveryCmd()
.equals(other.getRecoveryCmd());
}
result = result && (hasFinalizeCmd() == other.hasFinalizeCmd());
if (hasFinalizeCmd()) {
result = result && getFinalizeCmd()
.equals(other.getFinalizeCmd());
}
result = result && (hasKeyUpdateCmd() == other.hasKeyUpdateCmd());
if (hasKeyUpdateCmd()) {
result = result && getKeyUpdateCmd()
.equals(other.getKeyUpdateCmd());
}
result = result && (hasRegisterCmd() == other.hasRegisterCmd());
if (hasRegisterCmd()) {
result = result && getRegisterCmd()
.equals(other.getRegisterCmd());
}
result = result && (hasUpgradeCmd() == other.hasUpgradeCmd());
if (hasUpgradeCmd()) {
result = result && getUpgradeCmd()
.equals(other.getUpgradeCmd());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasCmdType()) {
hash = (37 * hash) + CMDTYPE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getCmdType());
}
if (hasBalancerCmd()) {
hash = (37 * hash) + BALANCERCMD_FIELD_NUMBER;
hash = (53 * hash) + getBalancerCmd().hashCode();
}
if (hasBlkCmd()) {
hash = (37 * hash) + BLKCMD_FIELD_NUMBER;
hash = (53 * hash) + getBlkCmd().hashCode();
}
if (hasRecoveryCmd()) {
hash = (37 * hash) + RECOVERYCMD_FIELD_NUMBER;
hash = (53 * hash) + getRecoveryCmd().hashCode();
}
if (hasFinalizeCmd()) {
hash = (37 * hash) + FINALIZECMD_FIELD_NUMBER;
hash = (53 * hash) + getFinalizeCmd().hashCode();
}
if (hasKeyUpdateCmd()) {
hash = (37 * hash) + KEYUPDATECMD_FIELD_NUMBER;
hash = (53 * hash) + getKeyUpdateCmd().hashCode();
}
if (hasRegisterCmd()) {
hash = (37 * hash) + REGISTERCMD_FIELD_NUMBER;
hash = (53 * hash) + getRegisterCmd().hashCode();
}
if (hasUpgradeCmd()) {
hash = (37 * hash) + UPGRADECMD_FIELD_NUMBER;
hash = (53 * hash) + getUpgradeCmd().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_DatanodeCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_DatanodeCommandProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getBalancerCmdFieldBuilder();
getBlkCmdFieldBuilder();
getRecoveryCmdFieldBuilder();
getFinalizeCmdFieldBuilder();
getKeyUpdateCmdFieldBuilder();
getRegisterCmdFieldBuilder();
getUpgradeCmdFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
cmdType_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.BalancerBandwidthCommand;
bitField0_ = (bitField0_ & ~0x00000001);
if (balancerCmdBuilder_ == null) {
balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance();
} else {
balancerCmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
if (blkCmdBuilder_ == null) {
blkCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance();
} else {
blkCmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
if (recoveryCmdBuilder_ == null) {
recoveryCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance();
} else {
recoveryCmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
if (finalizeCmdBuilder_ == null) {
finalizeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance();
} else {
finalizeCmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000010);
if (keyUpdateCmdBuilder_ == null) {
keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance();
} else {
keyUpdateCmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000020);
if (registerCmdBuilder_ == null) {
registerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance();
} else {
registerCmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000040);
if (upgradeCmdBuilder_ == null) {
upgradeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance();
} else {
upgradeCmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000080);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.cmdType_ = cmdType_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
if (balancerCmdBuilder_ == null) {
result.balancerCmd_ = balancerCmd_;
} else {
result.balancerCmd_ = balancerCmdBuilder_.build();
}
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
if (blkCmdBuilder_ == null) {
result.blkCmd_ = blkCmd_;
} else {
result.blkCmd_ = blkCmdBuilder_.build();
}
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
if (recoveryCmdBuilder_ == null) {
result.recoveryCmd_ = recoveryCmd_;
} else {
result.recoveryCmd_ = recoveryCmdBuilder_.build();
}
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
if (finalizeCmdBuilder_ == null) {
result.finalizeCmd_ = finalizeCmd_;
} else {
result.finalizeCmd_ = finalizeCmdBuilder_.build();
}
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000020;
}
if (keyUpdateCmdBuilder_ == null) {
result.keyUpdateCmd_ = keyUpdateCmd_;
} else {
result.keyUpdateCmd_ = keyUpdateCmdBuilder_.build();
}
if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
to_bitField0_ |= 0x00000040;
}
if (registerCmdBuilder_ == null) {
result.registerCmd_ = registerCmd_;
} else {
result.registerCmd_ = registerCmdBuilder_.build();
}
if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
to_bitField0_ |= 0x00000080;
}
if (upgradeCmdBuilder_ == null) {
result.upgradeCmd_ = upgradeCmd_;
} else {
result.upgradeCmd_ = upgradeCmdBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance()) return this;
if (other.hasCmdType()) {
setCmdType(other.getCmdType());
}
if (other.hasBalancerCmd()) {
mergeBalancerCmd(other.getBalancerCmd());
}
if (other.hasBlkCmd()) {
mergeBlkCmd(other.getBlkCmd());
}
if (other.hasRecoveryCmd()) {
mergeRecoveryCmd(other.getRecoveryCmd());
}
if (other.hasFinalizeCmd()) {
mergeFinalizeCmd(other.getFinalizeCmd());
}
if (other.hasKeyUpdateCmd()) {
mergeKeyUpdateCmd(other.getKeyUpdateCmd());
}
if (other.hasRegisterCmd()) {
mergeRegisterCmd(other.getRegisterCmd());
}
if (other.hasUpgradeCmd()) {
mergeUpgradeCmd(other.getUpgradeCmd());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasCmdType()) {
return false;
}
if (hasBalancerCmd()) {
if (!getBalancerCmd().isInitialized()) {
return false;
}
}
if (hasBlkCmd()) {
if (!getBlkCmd().isInitialized()) {
return false;
}
}
if (hasRecoveryCmd()) {
if (!getRecoveryCmd().isInitialized()) {
return false;
}
}
if (hasFinalizeCmd()) {
if (!getFinalizeCmd().isInitialized()) {
return false;
}
}
if (hasKeyUpdateCmd()) {
if (!getKeyUpdateCmd().isInitialized()) {
return false;
}
}
if (hasUpgradeCmd()) {
if (!getUpgradeCmd().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type value = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
cmdType_ = value;
}
break;
}
case 18: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.newBuilder();
if (hasBalancerCmd()) {
subBuilder.mergeFrom(getBalancerCmd());
}
input.readMessage(subBuilder, extensionRegistry);
setBalancerCmd(subBuilder.buildPartial());
break;
}
case 26: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.newBuilder();
if (hasBlkCmd()) {
subBuilder.mergeFrom(getBlkCmd());
}
input.readMessage(subBuilder, extensionRegistry);
setBlkCmd(subBuilder.buildPartial());
break;
}
case 34: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.newBuilder();
if (hasRecoveryCmd()) {
subBuilder.mergeFrom(getRecoveryCmd());
}
input.readMessage(subBuilder, extensionRegistry);
setRecoveryCmd(subBuilder.buildPartial());
break;
}
case 42: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.newBuilder();
if (hasFinalizeCmd()) {
subBuilder.mergeFrom(getFinalizeCmd());
}
input.readMessage(subBuilder, extensionRegistry);
setFinalizeCmd(subBuilder.buildPartial());
break;
}
case 50: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.newBuilder();
if (hasKeyUpdateCmd()) {
subBuilder.mergeFrom(getKeyUpdateCmd());
}
input.readMessage(subBuilder, extensionRegistry);
setKeyUpdateCmd(subBuilder.buildPartial());
break;
}
case 58: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.newBuilder();
if (hasRegisterCmd()) {
subBuilder.mergeFrom(getRegisterCmd());
}
input.readMessage(subBuilder, extensionRegistry);
setRegisterCmd(subBuilder.buildPartial());
break;
}
case 66: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.newBuilder();
if (hasUpgradeCmd()) {
subBuilder.mergeFrom(getUpgradeCmd());
}
input.readMessage(subBuilder, extensionRegistry);
setUpgradeCmd(subBuilder.buildPartial());
break;
}
}
}
}
private int bitField0_;
// required .DatanodeCommandProto.Type cmdType = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type cmdType_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.BalancerBandwidthCommand;
public boolean hasCmdType() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type getCmdType() {
return cmdType_;
}
public Builder setCmdType(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
cmdType_ = value;
onChanged();
return this;
}
public Builder clearCmdType() {
bitField0_ = (bitField0_ & ~0x00000001);
cmdType_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.BalancerBandwidthCommand;
onChanged();
return this;
}
// optional .BalancerBandwidthCommandProto balancerCmd = 2;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder> balancerCmdBuilder_;
public boolean hasBalancerCmd() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto getBalancerCmd() {
if (balancerCmdBuilder_ == null) {
return balancerCmd_;
} else {
return balancerCmdBuilder_.getMessage();
}
}
public Builder setBalancerCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto value) {
if (balancerCmdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
balancerCmd_ = value;
onChanged();
} else {
balancerCmdBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
return this;
}
public Builder setBalancerCmd(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder builderForValue) {
if (balancerCmdBuilder_ == null) {
balancerCmd_ = builderForValue.build();
onChanged();
} else {
balancerCmdBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
return this;
}
public Builder mergeBalancerCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto value) {
if (balancerCmdBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002) &&
balancerCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance()) {
balancerCmd_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.newBuilder(balancerCmd_).mergeFrom(value).buildPartial();
} else {
balancerCmd_ = value;
}
onChanged();
} else {
balancerCmdBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
return this;
}
public Builder clearBalancerCmd() {
if (balancerCmdBuilder_ == null) {
balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance();
onChanged();
} else {
balancerCmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder getBalancerCmdBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getBalancerCmdFieldBuilder().getBuilder();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder getBalancerCmdOrBuilder() {
if (balancerCmdBuilder_ != null) {
return balancerCmdBuilder_.getMessageOrBuilder();
} else {
return balancerCmd_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder>
getBalancerCmdFieldBuilder() {
if (balancerCmdBuilder_ == null) {
balancerCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder>(
balancerCmd_,
getParentForChildren(),
isClean());
balancerCmd_ = null;
}
return balancerCmdBuilder_;
}
// optional .BlockCommandProto blkCmd = 3;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto blkCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder> blkCmdBuilder_;
public boolean hasBlkCmd() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto getBlkCmd() {
if (blkCmdBuilder_ == null) {
return blkCmd_;
} else {
return blkCmdBuilder_.getMessage();
}
}
public Builder setBlkCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto value) {
if (blkCmdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
blkCmd_ = value;
onChanged();
} else {
blkCmdBuilder_.setMessage(value);
}
bitField0_ |= 0x00000004;
return this;
}
public Builder setBlkCmd(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder builderForValue) {
if (blkCmdBuilder_ == null) {
blkCmd_ = builderForValue.build();
onChanged();
} else {
blkCmdBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000004;
return this;
}
public Builder mergeBlkCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto value) {
if (blkCmdBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004) &&
blkCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance()) {
blkCmd_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.newBuilder(blkCmd_).mergeFrom(value).buildPartial();
} else {
blkCmd_ = value;
}
onChanged();
} else {
blkCmdBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000004;
return this;
}
public Builder clearBlkCmd() {
if (blkCmdBuilder_ == null) {
blkCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance();
onChanged();
} else {
blkCmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder getBlkCmdBuilder() {
bitField0_ |= 0x00000004;
onChanged();
return getBlkCmdFieldBuilder().getBuilder();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder getBlkCmdOrBuilder() {
if (blkCmdBuilder_ != null) {
return blkCmdBuilder_.getMessageOrBuilder();
} else {
return blkCmd_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder>
getBlkCmdFieldBuilder() {
if (blkCmdBuilder_ == null) {
blkCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder>(
blkCmd_,
getParentForChildren(),
isClean());
blkCmd_ = null;
}
return blkCmdBuilder_;
}
// optional .BlockRecoveryCommandProto recoveryCmd = 4;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto recoveryCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProtoOrBuilder> recoveryCmdBuilder_;
public boolean hasRecoveryCmd() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto getRecoveryCmd() {
if (recoveryCmdBuilder_ == null) {
return recoveryCmd_;
} else {
return recoveryCmdBuilder_.getMessage();
}
}
public Builder setRecoveryCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto value) {
if (recoveryCmdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
recoveryCmd_ = value;
onChanged();
} else {
recoveryCmdBuilder_.setMessage(value);
}
bitField0_ |= 0x00000008;
return this;
}
public Builder setRecoveryCmd(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.Builder builderForValue) {
if (recoveryCmdBuilder_ == null) {
recoveryCmd_ = builderForValue.build();
onChanged();
} else {
recoveryCmdBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000008;
return this;
}
public Builder mergeRecoveryCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto value) {
if (recoveryCmdBuilder_ == null) {
if (((bitField0_ & 0x00000008) == 0x00000008) &&
recoveryCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance()) {
recoveryCmd_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.newBuilder(recoveryCmd_).mergeFrom(value).buildPartial();
} else {
recoveryCmd_ = value;
}
onChanged();
} else {
recoveryCmdBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000008;
return this;
}
public Builder clearRecoveryCmd() {
if (recoveryCmdBuilder_ == null) {
recoveryCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance();
onChanged();
} else {
recoveryCmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.Builder getRecoveryCmdBuilder() {
bitField0_ |= 0x00000008;
onChanged();
return getRecoveryCmdFieldBuilder().getBuilder();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProtoOrBuilder getRecoveryCmdOrBuilder() {
if (recoveryCmdBuilder_ != null) {
return recoveryCmdBuilder_.getMessageOrBuilder();
} else {
return recoveryCmd_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProtoOrBuilder>
getRecoveryCmdFieldBuilder() {
if (recoveryCmdBuilder_ == null) {
recoveryCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProtoOrBuilder>(
recoveryCmd_,
getParentForChildren(),
isClean());
recoveryCmd_ = null;
}
return recoveryCmdBuilder_;
}
// optional .FinalizeCommandProto finalizeCmd = 5;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto finalizeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder> finalizeCmdBuilder_;
public boolean hasFinalizeCmd() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto getFinalizeCmd() {
if (finalizeCmdBuilder_ == null) {
return finalizeCmd_;
} else {
return finalizeCmdBuilder_.getMessage();
}
}
public Builder setFinalizeCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto value) {
if (finalizeCmdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
finalizeCmd_ = value;
onChanged();
} else {
finalizeCmdBuilder_.setMessage(value);
}
bitField0_ |= 0x00000010;
return this;
}
public Builder setFinalizeCmd(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder builderForValue) {
if (finalizeCmdBuilder_ == null) {
finalizeCmd_ = builderForValue.build();
onChanged();
} else {
finalizeCmdBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000010;
return this;
}
public Builder mergeFinalizeCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto value) {
if (finalizeCmdBuilder_ == null) {
if (((bitField0_ & 0x00000010) == 0x00000010) &&
finalizeCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance()) {
finalizeCmd_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.newBuilder(finalizeCmd_).mergeFrom(value).buildPartial();
} else {
finalizeCmd_ = value;
}
onChanged();
} else {
finalizeCmdBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000010;
return this;
}
public Builder clearFinalizeCmd() {
if (finalizeCmdBuilder_ == null) {
finalizeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance();
onChanged();
} else {
finalizeCmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder getFinalizeCmdBuilder() {
bitField0_ |= 0x00000010;
onChanged();
return getFinalizeCmdFieldBuilder().getBuilder();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder getFinalizeCmdOrBuilder() {
if (finalizeCmdBuilder_ != null) {
return finalizeCmdBuilder_.getMessageOrBuilder();
} else {
return finalizeCmd_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder>
getFinalizeCmdFieldBuilder() {
if (finalizeCmdBuilder_ == null) {
finalizeCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder>(
finalizeCmd_,
getParentForChildren(),
isClean());
finalizeCmd_ = null;
}
return finalizeCmdBuilder_;
}
// optional .KeyUpdateCommandProto keyUpdateCmd = 6;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder> keyUpdateCmdBuilder_;
public boolean hasKeyUpdateCmd() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto getKeyUpdateCmd() {
if (keyUpdateCmdBuilder_ == null) {
return keyUpdateCmd_;
} else {
return keyUpdateCmdBuilder_.getMessage();
}
}
public Builder setKeyUpdateCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto value) {
if (keyUpdateCmdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
keyUpdateCmd_ = value;
onChanged();
} else {
keyUpdateCmdBuilder_.setMessage(value);
}
bitField0_ |= 0x00000020;
return this;
}
public Builder setKeyUpdateCmd(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder builderForValue) {
if (keyUpdateCmdBuilder_ == null) {
keyUpdateCmd_ = builderForValue.build();
onChanged();
} else {
keyUpdateCmdBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000020;
return this;
}
public Builder mergeKeyUpdateCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto value) {
if (keyUpdateCmdBuilder_ == null) {
if (((bitField0_ & 0x00000020) == 0x00000020) &&
keyUpdateCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance()) {
keyUpdateCmd_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.newBuilder(keyUpdateCmd_).mergeFrom(value).buildPartial();
} else {
keyUpdateCmd_ = value;
}
onChanged();
} else {
keyUpdateCmdBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000020;
return this;
}
public Builder clearKeyUpdateCmd() {
if (keyUpdateCmdBuilder_ == null) {
keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance();
onChanged();
} else {
keyUpdateCmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000020);
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder getKeyUpdateCmdBuilder() {
bitField0_ |= 0x00000020;
onChanged();
return getKeyUpdateCmdFieldBuilder().getBuilder();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder getKeyUpdateCmdOrBuilder() {
if (keyUpdateCmdBuilder_ != null) {
return keyUpdateCmdBuilder_.getMessageOrBuilder();
} else {
return keyUpdateCmd_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder>
getKeyUpdateCmdFieldBuilder() {
if (keyUpdateCmdBuilder_ == null) {
keyUpdateCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder>(
keyUpdateCmd_,
getParentForChildren(),
isClean());
keyUpdateCmd_ = null;
}
return keyUpdateCmdBuilder_;
}
// optional .RegisterCommandProto registerCmd = 7;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto registerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder> registerCmdBuilder_;
public boolean hasRegisterCmd() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto getRegisterCmd() {
if (registerCmdBuilder_ == null) {
return registerCmd_;
} else {
return registerCmdBuilder_.getMessage();
}
}
public Builder setRegisterCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto value) {
if (registerCmdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
registerCmd_ = value;
onChanged();
} else {
registerCmdBuilder_.setMessage(value);
}
bitField0_ |= 0x00000040;
return this;
}
public Builder setRegisterCmd(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder builderForValue) {
if (registerCmdBuilder_ == null) {
registerCmd_ = builderForValue.build();
onChanged();
} else {
registerCmdBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000040;
return this;
}
public Builder mergeRegisterCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto value) {
if (registerCmdBuilder_ == null) {
if (((bitField0_ & 0x00000040) == 0x00000040) &&
registerCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance()) {
registerCmd_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.newBuilder(registerCmd_).mergeFrom(value).buildPartial();
} else {
registerCmd_ = value;
}
onChanged();
} else {
registerCmdBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000040;
return this;
}
public Builder clearRegisterCmd() {
if (registerCmdBuilder_ == null) {
registerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance();
onChanged();
} else {
registerCmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000040);
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder getRegisterCmdBuilder() {
bitField0_ |= 0x00000040;
onChanged();
return getRegisterCmdFieldBuilder().getBuilder();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder getRegisterCmdOrBuilder() {
if (registerCmdBuilder_ != null) {
return registerCmdBuilder_.getMessageOrBuilder();
} else {
return registerCmd_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder>
getRegisterCmdFieldBuilder() {
if (registerCmdBuilder_ == null) {
registerCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder>(
registerCmd_,
getParentForChildren(),
isClean());
registerCmd_ = null;
}
return registerCmdBuilder_;
}
// optional .UpgradeCommandProto upgradeCmd = 8;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto upgradeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder> upgradeCmdBuilder_;
public boolean hasUpgradeCmd() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto getUpgradeCmd() {
if (upgradeCmdBuilder_ == null) {
return upgradeCmd_;
} else {
return upgradeCmdBuilder_.getMessage();
}
}
public Builder setUpgradeCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto value) {
if (upgradeCmdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
upgradeCmd_ = value;
onChanged();
} else {
upgradeCmdBuilder_.setMessage(value);
}
bitField0_ |= 0x00000080;
return this;
}
public Builder setUpgradeCmd(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder builderForValue) {
if (upgradeCmdBuilder_ == null) {
upgradeCmd_ = builderForValue.build();
onChanged();
} else {
upgradeCmdBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000080;
return this;
}
public Builder mergeUpgradeCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto value) {
if (upgradeCmdBuilder_ == null) {
if (((bitField0_ & 0x00000080) == 0x00000080) &&
upgradeCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance()) {
upgradeCmd_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.newBuilder(upgradeCmd_).mergeFrom(value).buildPartial();
} else {
upgradeCmd_ = value;
}
onChanged();
} else {
upgradeCmdBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000080;
return this;
}
public Builder clearUpgradeCmd() {
if (upgradeCmdBuilder_ == null) {
upgradeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance();
onChanged();
} else {
upgradeCmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000080);
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder getUpgradeCmdBuilder() {
bitField0_ |= 0x00000080;
onChanged();
return getUpgradeCmdFieldBuilder().getBuilder();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder getUpgradeCmdOrBuilder() {
if (upgradeCmdBuilder_ != null) {
return upgradeCmdBuilder_.getMessageOrBuilder();
} else {
return upgradeCmd_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder>
getUpgradeCmdFieldBuilder() {
if (upgradeCmdBuilder_ == null) {
upgradeCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder>(
upgradeCmd_,
getParentForChildren(),
isClean());
upgradeCmd_ = null;
}
return upgradeCmdBuilder_;
}
// @@protoc_insertion_point(builder_scope:DatanodeCommandProto)
}
static {
defaultInstance = new DatanodeCommandProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:DatanodeCommandProto)
}
public interface BalancerBandwidthCommandProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required uint64 bandwidth = 1;
boolean hasBandwidth();
long getBandwidth();
}
public static final class BalancerBandwidthCommandProto extends
com.google.protobuf.GeneratedMessage
implements BalancerBandwidthCommandProtoOrBuilder {
// Use BalancerBandwidthCommandProto.newBuilder() to construct.
private BalancerBandwidthCommandProto(Builder builder) {
super(builder);
}
private BalancerBandwidthCommandProto(boolean noInit) {}
private static final BalancerBandwidthCommandProto defaultInstance;
public static BalancerBandwidthCommandProto getDefaultInstance() {
return defaultInstance;
}
public BalancerBandwidthCommandProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BalancerBandwidthCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BalancerBandwidthCommandProto_fieldAccessorTable;
}
private int bitField0_;
// required uint64 bandwidth = 1;
public static final int BANDWIDTH_FIELD_NUMBER = 1;
private long bandwidth_;
public boolean hasBandwidth() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public long getBandwidth() {
return bandwidth_;
}
private void initFields() {
bandwidth_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasBandwidth()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt64(1, bandwidth_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(1, bandwidth_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto) obj;
boolean result = true;
result = result && (hasBandwidth() == other.hasBandwidth());
if (hasBandwidth()) {
result = result && (getBandwidth()
== other.getBandwidth());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasBandwidth()) {
hash = (37 * hash) + BANDWIDTH_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getBandwidth());
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BalancerBandwidthCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BalancerBandwidthCommandProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
bandwidth_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.bandwidth_ = bandwidth_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance()) return this;
if (other.hasBandwidth()) {
setBandwidth(other.getBandwidth());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasBandwidth()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
bandwidth_ = input.readUInt64();
break;
}
}
}
}
private int bitField0_;
// required uint64 bandwidth = 1;
private long bandwidth_ ;
public boolean hasBandwidth() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public long getBandwidth() {
return bandwidth_;
}
public Builder setBandwidth(long value) {
bitField0_ |= 0x00000001;
bandwidth_ = value;
onChanged();
return this;
}
public Builder clearBandwidth() {
bitField0_ = (bitField0_ & ~0x00000001);
bandwidth_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:BalancerBandwidthCommandProto)
}
static {
defaultInstance = new BalancerBandwidthCommandProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:BalancerBandwidthCommandProto)
}
public interface BlockCommandProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .BlockCommandProto.Action action = 1;
boolean hasAction();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action getAction();
// required string blockPoolId = 2;
boolean hasBlockPoolId();
String getBlockPoolId();
// repeated .BlockProto blocks = 3;
java.util.List
getBlocksList();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index);
int getBlocksCount();
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>
getBlocksOrBuilderList();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder(
int index);
// repeated .DatanodeInfosProto targets = 4;
java.util.List
getTargetsList();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getTargets(int index);
int getTargetsCount();
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder>
getTargetsOrBuilderList();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder getTargetsOrBuilder(
int index);
}
public static final class BlockCommandProto extends
com.google.protobuf.GeneratedMessage
implements BlockCommandProtoOrBuilder {
// Use BlockCommandProto.newBuilder() to construct.
private BlockCommandProto(Builder builder) {
super(builder);
}
private BlockCommandProto(boolean noInit) {}
private static final BlockCommandProto defaultInstance;
public static BlockCommandProto getDefaultInstance() {
return defaultInstance;
}
public BlockCommandProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockCommandProto_fieldAccessorTable;
}
public enum Action
implements com.google.protobuf.ProtocolMessageEnum {
TRANSFER(0, 1),
INVALIDATE(1, 2),
SHUTDOWN(2, 3),
;
public static final int TRANSFER_VALUE = 1;
public static final int INVALIDATE_VALUE = 2;
public static final int SHUTDOWN_VALUE = 3;
public final int getNumber() { return value; }
public static Action valueOf(int value) {
switch (value) {
case 1: return TRANSFER;
case 2: return INVALIDATE;
case 3: return SHUTDOWN;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public Action findValueByNumber(int number) {
return Action.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDescriptor().getEnumTypes().get(0);
}
private static final Action[] VALUES = {
TRANSFER, INVALIDATE, SHUTDOWN,
};
public static Action valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private Action(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:BlockCommandProto.Action)
}
private int bitField0_;
// required .BlockCommandProto.Action action = 1;
public static final int ACTION_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action action_;
public boolean hasAction() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action getAction() {
return action_;
}
// required string blockPoolId = 2;
public static final int BLOCKPOOLID_FIELD_NUMBER = 2;
private java.lang.Object blockPoolId_;
public boolean hasBlockPoolId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public String getBlockPoolId() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof String) {
return (String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
String s = bs.toStringUtf8();
if (com.google.protobuf.Internal.isValidUtf8(bs)) {
blockPoolId_ = s;
}
return s;
}
}
private com.google.protobuf.ByteString getBlockPoolIdBytes() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8((String) ref);
blockPoolId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// repeated .BlockProto blocks = 3;
public static final int BLOCKS_FIELD_NUMBER = 3;
private java.util.List blocks_;
public java.util.List getBlocksList() {
return blocks_;
}
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>
getBlocksOrBuilderList() {
return blocks_;
}
public int getBlocksCount() {
return blocks_.size();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index) {
return blocks_.get(index);
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder(
int index) {
return blocks_.get(index);
}
// repeated .DatanodeInfosProto targets = 4;
public static final int TARGETS_FIELD_NUMBER = 4;
private java.util.List targets_;
public java.util.List getTargetsList() {
return targets_;
}
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder>
getTargetsOrBuilderList() {
return targets_;
}
public int getTargetsCount() {
return targets_.size();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getTargets(int index) {
return targets_.get(index);
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder getTargetsOrBuilder(
int index) {
return targets_.get(index);
}
private void initFields() {
action_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action.TRANSFER;
blockPoolId_ = "";
blocks_ = java.util.Collections.emptyList();
targets_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasAction()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasBlockPoolId()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getBlocksCount(); i++) {
if (!getBlocks(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
for (int i = 0; i < getTargetsCount(); i++) {
if (!getTargets(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, action_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getBlockPoolIdBytes());
}
for (int i = 0; i < blocks_.size(); i++) {
output.writeMessage(3, blocks_.get(i));
}
for (int i = 0; i < targets_.size(); i++) {
output.writeMessage(4, targets_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, action_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getBlockPoolIdBytes());
}
for (int i = 0; i < blocks_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, blocks_.get(i));
}
for (int i = 0; i < targets_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(4, targets_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto) obj;
boolean result = true;
result = result && (hasAction() == other.hasAction());
if (hasAction()) {
result = result &&
(getAction() == other.getAction());
}
result = result && (hasBlockPoolId() == other.hasBlockPoolId());
if (hasBlockPoolId()) {
result = result && getBlockPoolId()
.equals(other.getBlockPoolId());
}
result = result && getBlocksList()
.equals(other.getBlocksList());
result = result && getTargetsList()
.equals(other.getTargetsList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasAction()) {
hash = (37 * hash) + ACTION_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getAction());
}
if (hasBlockPoolId()) {
hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER;
hash = (53 * hash) + getBlockPoolId().hashCode();
}
if (getBlocksCount() > 0) {
hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
hash = (53 * hash) + getBlocksList().hashCode();
}
if (getTargetsCount() > 0) {
hash = (37 * hash) + TARGETS_FIELD_NUMBER;
hash = (53 * hash) + getTargetsList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockCommandProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getBlocksFieldBuilder();
getTargetsFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
action_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action.TRANSFER;
bitField0_ = (bitField0_ & ~0x00000001);
blockPoolId_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
if (blocksBuilder_ == null) {
blocks_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
} else {
blocksBuilder_.clear();
}
if (targetsBuilder_ == null) {
targets_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000008);
} else {
targetsBuilder_.clear();
}
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.action_ = action_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.blockPoolId_ = blockPoolId_;
if (blocksBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004)) {
blocks_ = java.util.Collections.unmodifiableList(blocks_);
bitField0_ = (bitField0_ & ~0x00000004);
}
result.blocks_ = blocks_;
} else {
result.blocks_ = blocksBuilder_.build();
}
if (targetsBuilder_ == null) {
if (((bitField0_ & 0x00000008) == 0x00000008)) {
targets_ = java.util.Collections.unmodifiableList(targets_);
bitField0_ = (bitField0_ & ~0x00000008);
}
result.targets_ = targets_;
} else {
result.targets_ = targetsBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance()) return this;
if (other.hasAction()) {
setAction(other.getAction());
}
if (other.hasBlockPoolId()) {
setBlockPoolId(other.getBlockPoolId());
}
if (blocksBuilder_ == null) {
if (!other.blocks_.isEmpty()) {
if (blocks_.isEmpty()) {
blocks_ = other.blocks_;
bitField0_ = (bitField0_ & ~0x00000004);
} else {
ensureBlocksIsMutable();
blocks_.addAll(other.blocks_);
}
onChanged();
}
} else {
if (!other.blocks_.isEmpty()) {
if (blocksBuilder_.isEmpty()) {
blocksBuilder_.dispose();
blocksBuilder_ = null;
blocks_ = other.blocks_;
bitField0_ = (bitField0_ & ~0x00000004);
blocksBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getBlocksFieldBuilder() : null;
} else {
blocksBuilder_.addAllMessages(other.blocks_);
}
}
}
if (targetsBuilder_ == null) {
if (!other.targets_.isEmpty()) {
if (targets_.isEmpty()) {
targets_ = other.targets_;
bitField0_ = (bitField0_ & ~0x00000008);
} else {
ensureTargetsIsMutable();
targets_.addAll(other.targets_);
}
onChanged();
}
} else {
if (!other.targets_.isEmpty()) {
if (targetsBuilder_.isEmpty()) {
targetsBuilder_.dispose();
targetsBuilder_ = null;
targets_ = other.targets_;
bitField0_ = (bitField0_ & ~0x00000008);
targetsBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getTargetsFieldBuilder() : null;
} else {
targetsBuilder_.addAllMessages(other.targets_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasAction()) {
return false;
}
if (!hasBlockPoolId()) {
return false;
}
for (int i = 0; i < getBlocksCount(); i++) {
if (!getBlocks(i).isInitialized()) {
return false;
}
}
for (int i = 0; i < getTargetsCount(); i++) {
if (!getTargets(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action value = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
action_ = value;
}
break;
}
case 18: {
bitField0_ |= 0x00000002;
blockPoolId_ = input.readBytes();
break;
}
case 26: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder();
input.readMessage(subBuilder, extensionRegistry);
addBlocks(subBuilder.buildPartial());
break;
}
case 34: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.newBuilder();
input.readMessage(subBuilder, extensionRegistry);
addTargets(subBuilder.buildPartial());
break;
}
}
}
}
private int bitField0_;
// required .BlockCommandProto.Action action = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action action_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action.TRANSFER;
public boolean hasAction() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action getAction() {
return action_;
}
public Builder setAction(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
action_ = value;
onChanged();
return this;
}
public Builder clearAction() {
bitField0_ = (bitField0_ & ~0x00000001);
action_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action.TRANSFER;
onChanged();
return this;
}
// required string blockPoolId = 2;
private java.lang.Object blockPoolId_ = "";
public boolean hasBlockPoolId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public String getBlockPoolId() {
java.lang.Object ref = blockPoolId_;
if (!(ref instanceof String)) {
String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
blockPoolId_ = s;
return s;
} else {
return (String) ref;
}
}
public Builder setBlockPoolId(String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
blockPoolId_ = value;
onChanged();
return this;
}
public Builder clearBlockPoolId() {
bitField0_ = (bitField0_ & ~0x00000002);
blockPoolId_ = getDefaultInstance().getBlockPoolId();
onChanged();
return this;
}
void setBlockPoolId(com.google.protobuf.ByteString value) {
bitField0_ |= 0x00000002;
blockPoolId_ = value;
onChanged();
}
// repeated .BlockProto blocks = 3;
private java.util.List blocks_ =
java.util.Collections.emptyList();
private void ensureBlocksIsMutable() {
if (!((bitField0_ & 0x00000004) == 0x00000004)) {
blocks_ = new java.util.ArrayList(blocks_);
bitField0_ |= 0x00000004;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> blocksBuilder_;
public java.util.List getBlocksList() {
if (blocksBuilder_ == null) {
return java.util.Collections.unmodifiableList(blocks_);
} else {
return blocksBuilder_.getMessageList();
}
}
public int getBlocksCount() {
if (blocksBuilder_ == null) {
return blocks_.size();
} else {
return blocksBuilder_.getCount();
}
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index) {
if (blocksBuilder_ == null) {
return blocks_.get(index);
} else {
return blocksBuilder_.getMessage(index);
}
}
public Builder setBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.set(index, value);
onChanged();
} else {
blocksBuilder_.setMessage(index, value);
}
return this;
}
public Builder setBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.set(index, builderForValue.build());
onChanged();
} else {
blocksBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.add(value);
onChanged();
} else {
blocksBuilder_.addMessage(value);
}
return this;
}
public Builder addBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.add(index, value);
onChanged();
} else {
blocksBuilder_.addMessage(index, value);
}
return this;
}
public Builder addBlocks(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.add(builderForValue.build());
onChanged();
} else {
blocksBuilder_.addMessage(builderForValue.build());
}
return this;
}
public Builder addBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.add(index, builderForValue.build());
onChanged();
} else {
blocksBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
public Builder addAllBlocks(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> values) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
super.addAll(values, blocks_);
onChanged();
} else {
blocksBuilder_.addAllMessages(values);
}
return this;
}
public Builder clearBlocks() {
if (blocksBuilder_ == null) {
blocks_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
onChanged();
} else {
blocksBuilder_.clear();
}
return this;
}
public Builder removeBlocks(int index) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.remove(index);
onChanged();
} else {
blocksBuilder_.remove(index);
}
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder getBlocksBuilder(
int index) {
return getBlocksFieldBuilder().getBuilder(index);
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder(
int index) {
if (blocksBuilder_ == null) {
return blocks_.get(index); } else {
return blocksBuilder_.getMessageOrBuilder(index);
}
}
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>
getBlocksOrBuilderList() {
if (blocksBuilder_ != null) {
return blocksBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(blocks_);
}
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder addBlocksBuilder() {
return getBlocksFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance());
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder addBlocksBuilder(
int index) {
return getBlocksFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance());
}
public java.util.List
getBlocksBuilderList() {
return getBlocksFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>
getBlocksFieldBuilder() {
if (blocksBuilder_ == null) {
blocksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>(
blocks_,
((bitField0_ & 0x00000004) == 0x00000004),
getParentForChildren(),
isClean());
blocks_ = null;
}
return blocksBuilder_;
}
// repeated .DatanodeInfosProto targets = 4;
private java.util.List targets_ =
java.util.Collections.emptyList();
private void ensureTargetsIsMutable() {
if (!((bitField0_ & 0x00000008) == 0x00000008)) {
targets_ = new java.util.ArrayList(targets_);
bitField0_ |= 0x00000008;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder> targetsBuilder_;
public java.util.List getTargetsList() {
if (targetsBuilder_ == null) {
return java.util.Collections.unmodifiableList(targets_);
} else {
return targetsBuilder_.getMessageList();
}
}
public int getTargetsCount() {
if (targetsBuilder_ == null) {
return targets_.size();
} else {
return targetsBuilder_.getCount();
}
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getTargets(int index) {
if (targetsBuilder_ == null) {
return targets_.get(index);
} else {
return targetsBuilder_.getMessage(index);
}
}
public Builder setTargets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto value) {
if (targetsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetsIsMutable();
targets_.set(index, value);
onChanged();
} else {
targetsBuilder_.setMessage(index, value);
}
return this;
}
public Builder setTargets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder builderForValue) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
targets_.set(index, builderForValue.build());
onChanged();
} else {
targetsBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
public Builder addTargets(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto value) {
if (targetsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetsIsMutable();
targets_.add(value);
onChanged();
} else {
targetsBuilder_.addMessage(value);
}
return this;
}
public Builder addTargets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto value) {
if (targetsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetsIsMutable();
targets_.add(index, value);
onChanged();
} else {
targetsBuilder_.addMessage(index, value);
}
return this;
}
public Builder addTargets(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder builderForValue) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
targets_.add(builderForValue.build());
onChanged();
} else {
targetsBuilder_.addMessage(builderForValue.build());
}
return this;
}
public Builder addTargets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder builderForValue) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
targets_.add(index, builderForValue.build());
onChanged();
} else {
targetsBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
public Builder addAllTargets(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto> values) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
super.addAll(values, targets_);
onChanged();
} else {
targetsBuilder_.addAllMessages(values);
}
return this;
}
public Builder clearTargets() {
if (targetsBuilder_ == null) {
targets_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000008);
onChanged();
} else {
targetsBuilder_.clear();
}
return this;
}
public Builder removeTargets(int index) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
targets_.remove(index);
onChanged();
} else {
targetsBuilder_.remove(index);
}
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder getTargetsBuilder(
int index) {
return getTargetsFieldBuilder().getBuilder(index);
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder getTargetsOrBuilder(
int index) {
if (targetsBuilder_ == null) {
return targets_.get(index); } else {
return targetsBuilder_.getMessageOrBuilder(index);
}
}
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder>
getTargetsOrBuilderList() {
if (targetsBuilder_ != null) {
return targetsBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(targets_);
}
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder addTargetsBuilder() {
return getTargetsFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance());
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder addTargetsBuilder(
int index) {
return getTargetsFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance());
}
public java.util.List
getTargetsBuilderList() {
return getTargetsFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder>
getTargetsFieldBuilder() {
if (targetsBuilder_ == null) {
targetsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder>(
targets_,
((bitField0_ & 0x00000008) == 0x00000008),
getParentForChildren(),
isClean());
targets_ = null;
}
return targetsBuilder_;
}
// @@protoc_insertion_point(builder_scope:BlockCommandProto)
}
static {
defaultInstance = new BlockCommandProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:BlockCommandProto)
}
public interface BlockRecoveryCommandProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated .RecoveringBlockProto blocks = 1;
java.util.List
getBlocksList();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto getBlocks(int index);
int getBlocksCount();
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder>
getBlocksOrBuilderList();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder getBlocksOrBuilder(
int index);
}
public static final class BlockRecoveryCommandProto extends
com.google.protobuf.GeneratedMessage
implements BlockRecoveryCommandProtoOrBuilder {
// Use BlockRecoveryCommandProto.newBuilder() to construct.
private BlockRecoveryCommandProto(Builder builder) {
super(builder);
}
private BlockRecoveryCommandProto(boolean noInit) {}
private static final BlockRecoveryCommandProto defaultInstance;
public static BlockRecoveryCommandProto getDefaultInstance() {
return defaultInstance;
}
public BlockRecoveryCommandProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockRecoveryCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockRecoveryCommandProto_fieldAccessorTable;
}
// repeated .RecoveringBlockProto blocks = 1;
public static final int BLOCKS_FIELD_NUMBER = 1;
private java.util.List blocks_;
public java.util.List getBlocksList() {
return blocks_;
}
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder>
getBlocksOrBuilderList() {
return blocks_;
}
public int getBlocksCount() {
return blocks_.size();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto getBlocks(int index) {
return blocks_.get(index);
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder getBlocksOrBuilder(
int index) {
return blocks_.get(index);
}
private void initFields() {
blocks_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
for (int i = 0; i < getBlocksCount(); i++) {
if (!getBlocks(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < blocks_.size(); i++) {
output.writeMessage(1, blocks_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
for (int i = 0; i < blocks_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, blocks_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto) obj;
boolean result = true;
result = result && getBlocksList()
.equals(other.getBlocksList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (getBlocksCount() > 0) {
hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
hash = (53 * hash) + getBlocksList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockRecoveryCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockRecoveryCommandProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getBlocksFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (blocksBuilder_ == null) {
blocks_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
} else {
blocksBuilder_.clear();
}
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto(this);
int from_bitField0_ = bitField0_;
if (blocksBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001)) {
blocks_ = java.util.Collections.unmodifiableList(blocks_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.blocks_ = blocks_;
} else {
result.blocks_ = blocksBuilder_.build();
}
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance()) return this;
if (blocksBuilder_ == null) {
if (!other.blocks_.isEmpty()) {
if (blocks_.isEmpty()) {
blocks_ = other.blocks_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureBlocksIsMutable();
blocks_.addAll(other.blocks_);
}
onChanged();
}
} else {
if (!other.blocks_.isEmpty()) {
if (blocksBuilder_.isEmpty()) {
blocksBuilder_.dispose();
blocksBuilder_ = null;
blocks_ = other.blocks_;
bitField0_ = (bitField0_ & ~0x00000001);
blocksBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getBlocksFieldBuilder() : null;
} else {
blocksBuilder_.addAllMessages(other.blocks_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
for (int i = 0; i < getBlocksCount(); i++) {
if (!getBlocks(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.newBuilder();
input.readMessage(subBuilder, extensionRegistry);
addBlocks(subBuilder.buildPartial());
break;
}
}
}
}
private int bitField0_;
// repeated .RecoveringBlockProto blocks = 1;
private java.util.List blocks_ =
java.util.Collections.emptyList();
private void ensureBlocksIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
blocks_ = new java.util.ArrayList(blocks_);
bitField0_ |= 0x00000001;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder> blocksBuilder_;
public java.util.List getBlocksList() {
if (blocksBuilder_ == null) {
return java.util.Collections.unmodifiableList(blocks_);
} else {
return blocksBuilder_.getMessageList();
}
}
public int getBlocksCount() {
if (blocksBuilder_ == null) {
return blocks_.size();
} else {
return blocksBuilder_.getCount();
}
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto getBlocks(int index) {
if (blocksBuilder_ == null) {
return blocks_.get(index);
} else {
return blocksBuilder_.getMessage(index);
}
}
public Builder setBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.set(index, value);
onChanged();
} else {
blocksBuilder_.setMessage(index, value);
}
return this;
}
public Builder setBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.set(index, builderForValue.build());
onChanged();
} else {
blocksBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.add(value);
onChanged();
} else {
blocksBuilder_.addMessage(value);
}
return this;
}
public Builder addBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.add(index, value);
onChanged();
} else {
blocksBuilder_.addMessage(index, value);
}
return this;
}
public Builder addBlocks(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.add(builderForValue.build());
onChanged();
} else {
blocksBuilder_.addMessage(builderForValue.build());
}
return this;
}
public Builder addBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.add(index, builderForValue.build());
onChanged();
} else {
blocksBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
public Builder addAllBlocks(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto> values) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
super.addAll(values, blocks_);
onChanged();
} else {
blocksBuilder_.addAllMessages(values);
}
return this;
}
public Builder clearBlocks() {
if (blocksBuilder_ == null) {
blocks_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
} else {
blocksBuilder_.clear();
}
return this;
}
public Builder removeBlocks(int index) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.remove(index);
onChanged();
} else {
blocksBuilder_.remove(index);
}
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder getBlocksBuilder(
int index) {
return getBlocksFieldBuilder().getBuilder(index);
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder getBlocksOrBuilder(
int index) {
if (blocksBuilder_ == null) {
return blocks_.get(index); } else {
return blocksBuilder_.getMessageOrBuilder(index);
}
}
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder>
getBlocksOrBuilderList() {
if (blocksBuilder_ != null) {
return blocksBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(blocks_);
}
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder addBlocksBuilder() {
return getBlocksFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.getDefaultInstance());
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder addBlocksBuilder(
int index) {
return getBlocksFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.getDefaultInstance());
}
public java.util.List
getBlocksBuilderList() {
return getBlocksFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder>
getBlocksFieldBuilder() {
if (blocksBuilder_ == null) {
blocksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder>(
blocks_,
((bitField0_ & 0x00000001) == 0x00000001),
getParentForChildren(),
isClean());
blocks_ = null;
}
return blocksBuilder_;
}
// @@protoc_insertion_point(builder_scope:BlockRecoveryCommandProto)
}
static {
defaultInstance = new BlockRecoveryCommandProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:BlockRecoveryCommandProto)
}
public interface FinalizeCommandProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string blockPoolId = 1;
boolean hasBlockPoolId();
String getBlockPoolId();
}
public static final class FinalizeCommandProto extends
com.google.protobuf.GeneratedMessage
implements FinalizeCommandProtoOrBuilder {
// Use FinalizeCommandProto.newBuilder() to construct.
private FinalizeCommandProto(Builder builder) {
super(builder);
}
private FinalizeCommandProto(boolean noInit) {}
private static final FinalizeCommandProto defaultInstance;
public static FinalizeCommandProto getDefaultInstance() {
return defaultInstance;
}
public FinalizeCommandProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_FinalizeCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_FinalizeCommandProto_fieldAccessorTable;
}
private int bitField0_;
// required string blockPoolId = 1;
public static final int BLOCKPOOLID_FIELD_NUMBER = 1;
private java.lang.Object blockPoolId_;
public boolean hasBlockPoolId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public String getBlockPoolId() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof String) {
return (String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
String s = bs.toStringUtf8();
if (com.google.protobuf.Internal.isValidUtf8(bs)) {
blockPoolId_ = s;
}
return s;
}
}
private com.google.protobuf.ByteString getBlockPoolIdBytes() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8((String) ref);
blockPoolId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
blockPoolId_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasBlockPoolId()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getBlockPoolIdBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getBlockPoolIdBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto) obj;
boolean result = true;
result = result && (hasBlockPoolId() == other.hasBlockPoolId());
if (hasBlockPoolId()) {
result = result && getBlockPoolId()
.equals(other.getBlockPoolId());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasBlockPoolId()) {
hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER;
hash = (53 * hash) + getBlockPoolId().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_FinalizeCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_FinalizeCommandProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
blockPoolId_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.blockPoolId_ = blockPoolId_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance()) return this;
if (other.hasBlockPoolId()) {
setBlockPoolId(other.getBlockPoolId());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasBlockPoolId()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
blockPoolId_ = input.readBytes();
break;
}
}
}
}
private int bitField0_;
// required string blockPoolId = 1;
private java.lang.Object blockPoolId_ = "";
public boolean hasBlockPoolId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public String getBlockPoolId() {
java.lang.Object ref = blockPoolId_;
if (!(ref instanceof String)) {
String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
blockPoolId_ = s;
return s;
} else {
return (String) ref;
}
}
public Builder setBlockPoolId(String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
blockPoolId_ = value;
onChanged();
return this;
}
public Builder clearBlockPoolId() {
bitField0_ = (bitField0_ & ~0x00000001);
blockPoolId_ = getDefaultInstance().getBlockPoolId();
onChanged();
return this;
}
void setBlockPoolId(com.google.protobuf.ByteString value) {
bitField0_ |= 0x00000001;
blockPoolId_ = value;
onChanged();
}
// @@protoc_insertion_point(builder_scope:FinalizeCommandProto)
}
static {
defaultInstance = new FinalizeCommandProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:FinalizeCommandProto)
}
public interface KeyUpdateCommandProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .ExportedBlockKeysProto keys = 1;
boolean hasKeys();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto getKeys();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder();
}
public static final class KeyUpdateCommandProto extends
com.google.protobuf.GeneratedMessage
implements KeyUpdateCommandProtoOrBuilder {
// Use KeyUpdateCommandProto.newBuilder() to construct.
private KeyUpdateCommandProto(Builder builder) {
super(builder);
}
private KeyUpdateCommandProto(boolean noInit) {}
private static final KeyUpdateCommandProto defaultInstance;
public static KeyUpdateCommandProto getDefaultInstance() {
return defaultInstance;
}
public KeyUpdateCommandProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_KeyUpdateCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_KeyUpdateCommandProto_fieldAccessorTable;
}
private int bitField0_;
// required .ExportedBlockKeysProto keys = 1;
public static final int KEYS_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto keys_;
public boolean hasKeys() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto getKeys() {
return keys_;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder() {
return keys_;
}
private void initFields() {
keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasKeys()) {
memoizedIsInitialized = 0;
return false;
}
if (!getKeys().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, keys_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, keys_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto) obj;
boolean result = true;
result = result && (hasKeys() == other.hasKeys());
if (hasKeys()) {
result = result && getKeys()
.equals(other.getKeys());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasKeys()) {
hash = (37 * hash) + KEYS_FIELD_NUMBER;
hash = (53 * hash) + getKeys().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_KeyUpdateCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_KeyUpdateCommandProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getKeysFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (keysBuilder_ == null) {
keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance();
} else {
keysBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (keysBuilder_ == null) {
result.keys_ = keys_;
} else {
result.keys_ = keysBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance()) return this;
if (other.hasKeys()) {
mergeKeys(other.getKeys());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasKeys()) {
return false;
}
if (!getKeys().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.newBuilder();
if (hasKeys()) {
subBuilder.mergeFrom(getKeys());
}
input.readMessage(subBuilder, extensionRegistry);
setKeys(subBuilder.buildPartial());
break;
}
}
}
}
private int bitField0_;
// required .ExportedBlockKeysProto keys = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder> keysBuilder_;
public boolean hasKeys() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto getKeys() {
if (keysBuilder_ == null) {
return keys_;
} else {
return keysBuilder_.getMessage();
}
}
public Builder setKeys(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto value) {
if (keysBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
keys_ = value;
onChanged();
} else {
keysBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder setKeys(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder builderForValue) {
if (keysBuilder_ == null) {
keys_ = builderForValue.build();
onChanged();
} else {
keysBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
public Builder mergeKeys(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto value) {
if (keysBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
keys_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance()) {
keys_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.newBuilder(keys_).mergeFrom(value).buildPartial();
} else {
keys_ = value;
}
onChanged();
} else {
keysBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder clearKeys() {
if (keysBuilder_ == null) {
keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance();
onChanged();
} else {
keysBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder getKeysBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getKeysFieldBuilder().getBuilder();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder() {
if (keysBuilder_ != null) {
return keysBuilder_.getMessageOrBuilder();
} else {
return keys_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder>
getKeysFieldBuilder() {
if (keysBuilder_ == null) {
keysBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder>(
keys_,
getParentForChildren(),
isClean());
keys_ = null;
}
return keysBuilder_;
}
// @@protoc_insertion_point(builder_scope:KeyUpdateCommandProto)
}
static {
defaultInstance = new KeyUpdateCommandProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:KeyUpdateCommandProto)
}
public interface RegisterCommandProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
}
public static final class RegisterCommandProto extends
com.google.protobuf.GeneratedMessage
implements RegisterCommandProtoOrBuilder {
// Use RegisterCommandProto.newBuilder() to construct.
private RegisterCommandProto(Builder builder) {
super(builder);
}
private RegisterCommandProto(boolean noInit) {}
private static final RegisterCommandProto defaultInstance;
public static RegisterCommandProto getDefaultInstance() {
return defaultInstance;
}
public RegisterCommandProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterCommandProto_fieldAccessorTable;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterCommandProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
}
}
}
// @@protoc_insertion_point(builder_scope:RegisterCommandProto)
}
static {
defaultInstance = new RegisterCommandProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:RegisterCommandProto)
}
public interface UpgradeCommandProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .UpgradeCommandProto.Action action = 1;
boolean hasAction();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Action getAction();
// required uint32 version = 2;
boolean hasVersion();
int getVersion();
// required uint32 upgradeStatus = 3;
boolean hasUpgradeStatus();
int getUpgradeStatus();
}
public static final class UpgradeCommandProto extends
com.google.protobuf.GeneratedMessage
implements UpgradeCommandProtoOrBuilder {
// Use UpgradeCommandProto.newBuilder() to construct.
private UpgradeCommandProto(Builder builder) {
super(builder);
}
private UpgradeCommandProto(boolean noInit) {}
private static final UpgradeCommandProto defaultInstance;
public static UpgradeCommandProto getDefaultInstance() {
return defaultInstance;
}
public UpgradeCommandProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_UpgradeCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_UpgradeCommandProto_fieldAccessorTable;
}
public enum Action
implements com.google.protobuf.ProtocolMessageEnum {
UNKNOWN(0, 0),
REPORT_STATUS(1, 100),
START_UPGRADE(2, 101),
;
public static final int UNKNOWN_VALUE = 0;
public static final int REPORT_STATUS_VALUE = 100;
public static final int START_UPGRADE_VALUE = 101;
public final int getNumber() { return value; }
public static Action valueOf(int value) {
switch (value) {
case 0: return UNKNOWN;
case 100: return REPORT_STATUS;
case 101: return START_UPGRADE;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public Action findValueByNumber(int number) {
return Action.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDescriptor().getEnumTypes().get(0);
}
private static final Action[] VALUES = {
UNKNOWN, REPORT_STATUS, START_UPGRADE,
};
public static Action valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private Action(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:UpgradeCommandProto.Action)
}
private int bitField0_;
// required .UpgradeCommandProto.Action action = 1;
public static final int ACTION_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Action action_;
public boolean hasAction() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Action getAction() {
return action_;
}
// required uint32 version = 2;
public static final int VERSION_FIELD_NUMBER = 2;
private int version_;
public boolean hasVersion() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public int getVersion() {
return version_;
}
// required uint32 upgradeStatus = 3;
public static final int UPGRADESTATUS_FIELD_NUMBER = 3;
private int upgradeStatus_;
public boolean hasUpgradeStatus() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
public int getUpgradeStatus() {
return upgradeStatus_;
}
private void initFields() {
action_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Action.UNKNOWN;
version_ = 0;
upgradeStatus_ = 0;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasAction()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasVersion()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasUpgradeStatus()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, action_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt32(2, version_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt32(3, upgradeStatus_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, action_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(2, version_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(3, upgradeStatus_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto) obj;
boolean result = true;
result = result && (hasAction() == other.hasAction());
if (hasAction()) {
result = result &&
(getAction() == other.getAction());
}
result = result && (hasVersion() == other.hasVersion());
if (hasVersion()) {
result = result && (getVersion()
== other.getVersion());
}
result = result && (hasUpgradeStatus() == other.hasUpgradeStatus());
if (hasUpgradeStatus()) {
result = result && (getUpgradeStatus()
== other.getUpgradeStatus());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasAction()) {
hash = (37 * hash) + ACTION_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getAction());
}
if (hasVersion()) {
hash = (37 * hash) + VERSION_FIELD_NUMBER;
hash = (53 * hash) + getVersion();
}
if (hasUpgradeStatus()) {
hash = (37 * hash) + UPGRADESTATUS_FIELD_NUMBER;
hash = (53 * hash) + getUpgradeStatus();
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_UpgradeCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_UpgradeCommandProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
action_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Action.UNKNOWN;
bitField0_ = (bitField0_ & ~0x00000001);
version_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
upgradeStatus_ = 0;
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.action_ = action_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.version_ = version_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.upgradeStatus_ = upgradeStatus_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance()) return this;
if (other.hasAction()) {
setAction(other.getAction());
}
if (other.hasVersion()) {
setVersion(other.getVersion());
}
if (other.hasUpgradeStatus()) {
setUpgradeStatus(other.getUpgradeStatus());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasAction()) {
return false;
}
if (!hasVersion()) {
return false;
}
if (!hasUpgradeStatus()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Action value = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Action.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
action_ = value;
}
break;
}
case 16: {
bitField0_ |= 0x00000002;
version_ = input.readUInt32();
break;
}
case 24: {
bitField0_ |= 0x00000004;
upgradeStatus_ = input.readUInt32();
break;
}
}
}
}
private int bitField0_;
// required .UpgradeCommandProto.Action action = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Action action_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Action.UNKNOWN;
public boolean hasAction() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Action getAction() {
return action_;
}
public Builder setAction(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Action value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
action_ = value;
onChanged();
return this;
}
public Builder clearAction() {
bitField0_ = (bitField0_ & ~0x00000001);
action_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Action.UNKNOWN;
onChanged();
return this;
}
// required uint32 version = 2;
private int version_ ;
public boolean hasVersion() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public int getVersion() {
return version_;
}
public Builder setVersion(int value) {
bitField0_ |= 0x00000002;
version_ = value;
onChanged();
return this;
}
public Builder clearVersion() {
bitField0_ = (bitField0_ & ~0x00000002);
version_ = 0;
onChanged();
return this;
}
// required uint32 upgradeStatus = 3;
private int upgradeStatus_ ;
public boolean hasUpgradeStatus() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
public int getUpgradeStatus() {
return upgradeStatus_;
}
public Builder setUpgradeStatus(int value) {
bitField0_ |= 0x00000004;
upgradeStatus_ = value;
onChanged();
return this;
}
public Builder clearUpgradeStatus() {
bitField0_ = (bitField0_ & ~0x00000004);
upgradeStatus_ = 0;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:UpgradeCommandProto)
}
static {
defaultInstance = new UpgradeCommandProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:UpgradeCommandProto)
}
public interface RegisterDatanodeRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .DatanodeRegistrationProto registration = 1;
boolean hasRegistration();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder();
}
public static final class RegisterDatanodeRequestProto extends
com.google.protobuf.GeneratedMessage
implements RegisterDatanodeRequestProtoOrBuilder {
// Use RegisterDatanodeRequestProto.newBuilder() to construct.
private RegisterDatanodeRequestProto(Builder builder) {
super(builder);
}
private RegisterDatanodeRequestProto(boolean noInit) {}
private static final RegisterDatanodeRequestProto defaultInstance;
public static RegisterDatanodeRequestProto getDefaultInstance() {
return defaultInstance;
}
public RegisterDatanodeRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterDatanodeRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterDatanodeRequestProto_fieldAccessorTable;
}
private int bitField0_;
// required .DatanodeRegistrationProto registration = 1;
public static final int REGISTRATION_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_;
public boolean hasRegistration() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
return registration_;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
return registration_;
}
private void initFields() {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasRegistration()) {
memoizedIsInitialized = 0;
return false;
}
if (!getRegistration().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, registration_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, registration_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto) obj;
boolean result = true;
result = result && (hasRegistration() == other.hasRegistration());
if (hasRegistration()) {
result = result && getRegistration()
.equals(other.getRegistration());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasRegistration()) {
hash = (37 * hash) + REGISTRATION_FIELD_NUMBER;
hash = (53 * hash) + getRegistration().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterDatanodeRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterDatanodeRequestProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getRegistrationFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (registrationBuilder_ == null) {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
} else {
registrationBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (registrationBuilder_ == null) {
result.registration_ = registration_;
} else {
result.registration_ = registrationBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.getDefaultInstance()) return this;
if (other.hasRegistration()) {
mergeRegistration(other.getRegistration());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasRegistration()) {
return false;
}
if (!getRegistration().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder();
if (hasRegistration()) {
subBuilder.mergeFrom(getRegistration());
}
input.readMessage(subBuilder, extensionRegistry);
setRegistration(subBuilder.buildPartial());
break;
}
}
}
}
private int bitField0_;
// required .DatanodeRegistrationProto registration = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registrationBuilder_;
public boolean hasRegistration() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
if (registrationBuilder_ == null) {
return registration_;
} else {
return registrationBuilder_.getMessage();
}
}
public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
if (registrationBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
registration_ = value;
onChanged();
} else {
registrationBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder setRegistration(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) {
if (registrationBuilder_ == null) {
registration_ = builderForValue.build();
onChanged();
} else {
registrationBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
if (registrationBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
registration_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) {
registration_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial();
} else {
registration_ = value;
}
onChanged();
} else {
registrationBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder clearRegistration() {
if (registrationBuilder_ == null) {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
onChanged();
} else {
registrationBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistrationBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getRegistrationFieldBuilder().getBuilder();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
if (registrationBuilder_ != null) {
return registrationBuilder_.getMessageOrBuilder();
} else {
return registration_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>
getRegistrationFieldBuilder() {
if (registrationBuilder_ == null) {
registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>(
registration_,
getParentForChildren(),
isClean());
registration_ = null;
}
return registrationBuilder_;
}
// @@protoc_insertion_point(builder_scope:RegisterDatanodeRequestProto)
}
static {
defaultInstance = new RegisterDatanodeRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:RegisterDatanodeRequestProto)
}
public interface RegisterDatanodeResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .DatanodeRegistrationProto registration = 1;
boolean hasRegistration();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder();
}
public static final class RegisterDatanodeResponseProto extends
com.google.protobuf.GeneratedMessage
implements RegisterDatanodeResponseProtoOrBuilder {
// Use RegisterDatanodeResponseProto.newBuilder() to construct.
private RegisterDatanodeResponseProto(Builder builder) {
super(builder);
}
private RegisterDatanodeResponseProto(boolean noInit) {}
private static final RegisterDatanodeResponseProto defaultInstance;
public static RegisterDatanodeResponseProto getDefaultInstance() {
return defaultInstance;
}
public RegisterDatanodeResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterDatanodeResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterDatanodeResponseProto_fieldAccessorTable;
}
private int bitField0_;
// required .DatanodeRegistrationProto registration = 1;
public static final int REGISTRATION_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_;
public boolean hasRegistration() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
return registration_;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
return registration_;
}
private void initFields() {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasRegistration()) {
memoizedIsInitialized = 0;
return false;
}
if (!getRegistration().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, registration_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, registration_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto) obj;
boolean result = true;
result = result && (hasRegistration() == other.hasRegistration());
if (hasRegistration()) {
result = result && getRegistration()
.equals(other.getRegistration());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasRegistration()) {
hash = (37 * hash) + REGISTRATION_FIELD_NUMBER;
hash = (53 * hash) + getRegistration().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterDatanodeResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterDatanodeResponseProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getRegistrationFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (registrationBuilder_ == null) {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
} else {
registrationBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (registrationBuilder_ == null) {
result.registration_ = registration_;
} else {
result.registration_ = registrationBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDefaultInstance()) return this;
if (other.hasRegistration()) {
mergeRegistration(other.getRegistration());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasRegistration()) {
return false;
}
if (!getRegistration().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder();
if (hasRegistration()) {
subBuilder.mergeFrom(getRegistration());
}
input.readMessage(subBuilder, extensionRegistry);
setRegistration(subBuilder.buildPartial());
break;
}
}
}
}
private int bitField0_;
// required .DatanodeRegistrationProto registration = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registrationBuilder_;
public boolean hasRegistration() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
if (registrationBuilder_ == null) {
return registration_;
} else {
return registrationBuilder_.getMessage();
}
}
public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
if (registrationBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
registration_ = value;
onChanged();
} else {
registrationBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder setRegistration(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) {
if (registrationBuilder_ == null) {
registration_ = builderForValue.build();
onChanged();
} else {
registrationBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
if (registrationBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
registration_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) {
registration_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial();
} else {
registration_ = value;
}
onChanged();
} else {
registrationBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder clearRegistration() {
if (registrationBuilder_ == null) {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
onChanged();
} else {
registrationBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistrationBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getRegistrationFieldBuilder().getBuilder();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
if (registrationBuilder_ != null) {
return registrationBuilder_.getMessageOrBuilder();
} else {
return registration_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>
getRegistrationFieldBuilder() {
if (registrationBuilder_ == null) {
registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>(
registration_,
getParentForChildren(),
isClean());
registration_ = null;
}
return registrationBuilder_;
}
// @@protoc_insertion_point(builder_scope:RegisterDatanodeResponseProto)
}
static {
defaultInstance = new RegisterDatanodeResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:RegisterDatanodeResponseProto)
}
public interface HeartbeatRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .DatanodeRegistrationProto registration = 1;
boolean hasRegistration();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder();
// repeated .StorageReportProto reports = 2;
java.util.List
getReportsList();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto getReports(int index);
int getReportsCount();
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProtoOrBuilder>
getReportsOrBuilderList();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProtoOrBuilder getReportsOrBuilder(
int index);
// optional uint32 xmitsInProgress = 3 [default = 0];
boolean hasXmitsInProgress();
int getXmitsInProgress();
// optional uint32 xceiverCount = 4 [default = 0];
boolean hasXceiverCount();
int getXceiverCount();
// optional uint32 failedVolumes = 5 [default = 0];
boolean hasFailedVolumes();
int getFailedVolumes();
}
public static final class HeartbeatRequestProto extends
com.google.protobuf.GeneratedMessage
implements HeartbeatRequestProtoOrBuilder {
// Use HeartbeatRequestProto.newBuilder() to construct.
private HeartbeatRequestProto(Builder builder) {
super(builder);
}
private HeartbeatRequestProto(boolean noInit) {}
private static final HeartbeatRequestProto defaultInstance;
public static HeartbeatRequestProto getDefaultInstance() {
return defaultInstance;
}
public HeartbeatRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_HeartbeatRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_HeartbeatRequestProto_fieldAccessorTable;
}
private int bitField0_;
// required .DatanodeRegistrationProto registration = 1;
public static final int REGISTRATION_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_;
public boolean hasRegistration() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
return registration_;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
return registration_;
}
// repeated .StorageReportProto reports = 2;
public static final int REPORTS_FIELD_NUMBER = 2;
private java.util.List reports_;
public java.util.List getReportsList() {
return reports_;
}
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProtoOrBuilder>
getReportsOrBuilderList() {
return reports_;
}
public int getReportsCount() {
return reports_.size();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto getReports(int index) {
return reports_.get(index);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProtoOrBuilder getReportsOrBuilder(
int index) {
return reports_.get(index);
}
// optional uint32 xmitsInProgress = 3 [default = 0];
public static final int XMITSINPROGRESS_FIELD_NUMBER = 3;
private int xmitsInProgress_;
public boolean hasXmitsInProgress() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public int getXmitsInProgress() {
return xmitsInProgress_;
}
// optional uint32 xceiverCount = 4 [default = 0];
public static final int XCEIVERCOUNT_FIELD_NUMBER = 4;
private int xceiverCount_;
public boolean hasXceiverCount() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
public int getXceiverCount() {
return xceiverCount_;
}
// optional uint32 failedVolumes = 5 [default = 0];
public static final int FAILEDVOLUMES_FIELD_NUMBER = 5;
private int failedVolumes_;
public boolean hasFailedVolumes() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
public int getFailedVolumes() {
return failedVolumes_;
}
private void initFields() {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
reports_ = java.util.Collections.emptyList();
xmitsInProgress_ = 0;
xceiverCount_ = 0;
failedVolumes_ = 0;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasRegistration()) {
memoizedIsInitialized = 0;
return false;
}
if (!getRegistration().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getReportsCount(); i++) {
if (!getReports(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, registration_);
}
for (int i = 0; i < reports_.size(); i++) {
output.writeMessage(2, reports_.get(i));
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt32(3, xmitsInProgress_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt32(4, xceiverCount_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt32(5, failedVolumes_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, registration_);
}
for (int i = 0; i < reports_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, reports_.get(i));
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(3, xmitsInProgress_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(4, xceiverCount_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(5, failedVolumes_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto) obj;
boolean result = true;
result = result && (hasRegistration() == other.hasRegistration());
if (hasRegistration()) {
result = result && getRegistration()
.equals(other.getRegistration());
}
result = result && getReportsList()
.equals(other.getReportsList());
result = result && (hasXmitsInProgress() == other.hasXmitsInProgress());
if (hasXmitsInProgress()) {
result = result && (getXmitsInProgress()
== other.getXmitsInProgress());
}
result = result && (hasXceiverCount() == other.hasXceiverCount());
if (hasXceiverCount()) {
result = result && (getXceiverCount()
== other.getXceiverCount());
}
result = result && (hasFailedVolumes() == other.hasFailedVolumes());
if (hasFailedVolumes()) {
result = result && (getFailedVolumes()
== other.getFailedVolumes());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasRegistration()) {
hash = (37 * hash) + REGISTRATION_FIELD_NUMBER;
hash = (53 * hash) + getRegistration().hashCode();
}
if (getReportsCount() > 0) {
hash = (37 * hash) + REPORTS_FIELD_NUMBER;
hash = (53 * hash) + getReportsList().hashCode();
}
if (hasXmitsInProgress()) {
hash = (37 * hash) + XMITSINPROGRESS_FIELD_NUMBER;
hash = (53 * hash) + getXmitsInProgress();
}
if (hasXceiverCount()) {
hash = (37 * hash) + XCEIVERCOUNT_FIELD_NUMBER;
hash = (53 * hash) + getXceiverCount();
}
if (hasFailedVolumes()) {
hash = (37 * hash) + FAILEDVOLUMES_FIELD_NUMBER;
hash = (53 * hash) + getFailedVolumes();
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_HeartbeatRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_HeartbeatRequestProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getRegistrationFieldBuilder();
getReportsFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (registrationBuilder_ == null) {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
} else {
registrationBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
if (reportsBuilder_ == null) {
reports_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
} else {
reportsBuilder_.clear();
}
xmitsInProgress_ = 0;
bitField0_ = (bitField0_ & ~0x00000004);
xceiverCount_ = 0;
bitField0_ = (bitField0_ & ~0x00000008);
failedVolumes_ = 0;
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (registrationBuilder_ == null) {
result.registration_ = registration_;
} else {
result.registration_ = registrationBuilder_.build();
}
if (reportsBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002)) {
reports_ = java.util.Collections.unmodifiableList(reports_);
bitField0_ = (bitField0_ & ~0x00000002);
}
result.reports_ = reports_;
} else {
result.reports_ = reportsBuilder_.build();
}
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000002;
}
result.xmitsInProgress_ = xmitsInProgress_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000004;
}
result.xceiverCount_ = xceiverCount_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000008;
}
result.failedVolumes_ = failedVolumes_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.getDefaultInstance()) return this;
if (other.hasRegistration()) {
mergeRegistration(other.getRegistration());
}
if (reportsBuilder_ == null) {
if (!other.reports_.isEmpty()) {
if (reports_.isEmpty()) {
reports_ = other.reports_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureReportsIsMutable();
reports_.addAll(other.reports_);
}
onChanged();
}
} else {
if (!other.reports_.isEmpty()) {
if (reportsBuilder_.isEmpty()) {
reportsBuilder_.dispose();
reportsBuilder_ = null;
reports_ = other.reports_;
bitField0_ = (bitField0_ & ~0x00000002);
reportsBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getReportsFieldBuilder() : null;
} else {
reportsBuilder_.addAllMessages(other.reports_);
}
}
}
if (other.hasXmitsInProgress()) {
setXmitsInProgress(other.getXmitsInProgress());
}
if (other.hasXceiverCount()) {
setXceiverCount(other.getXceiverCount());
}
if (other.hasFailedVolumes()) {
setFailedVolumes(other.getFailedVolumes());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasRegistration()) {
return false;
}
if (!getRegistration().isInitialized()) {
return false;
}
for (int i = 0; i < getReportsCount(); i++) {
if (!getReports(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder();
if (hasRegistration()) {
subBuilder.mergeFrom(getRegistration());
}
input.readMessage(subBuilder, extensionRegistry);
setRegistration(subBuilder.buildPartial());
break;
}
case 18: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto.newBuilder();
input.readMessage(subBuilder, extensionRegistry);
addReports(subBuilder.buildPartial());
break;
}
case 24: {
bitField0_ |= 0x00000004;
xmitsInProgress_ = input.readUInt32();
break;
}
case 32: {
bitField0_ |= 0x00000008;
xceiverCount_ = input.readUInt32();
break;
}
case 40: {
bitField0_ |= 0x00000010;
failedVolumes_ = input.readUInt32();
break;
}
}
}
}
private int bitField0_;
// required .DatanodeRegistrationProto registration = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registrationBuilder_;
public boolean hasRegistration() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
if (registrationBuilder_ == null) {
return registration_;
} else {
return registrationBuilder_.getMessage();
}
}
public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
if (registrationBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
registration_ = value;
onChanged();
} else {
registrationBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder setRegistration(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) {
if (registrationBuilder_ == null) {
registration_ = builderForValue.build();
onChanged();
} else {
registrationBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
if (registrationBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
registration_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) {
registration_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial();
} else {
registration_ = value;
}
onChanged();
} else {
registrationBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder clearRegistration() {
if (registrationBuilder_ == null) {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
onChanged();
} else {
registrationBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistrationBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getRegistrationFieldBuilder().getBuilder();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
if (registrationBuilder_ != null) {
return registrationBuilder_.getMessageOrBuilder();
} else {
return registration_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>
getRegistrationFieldBuilder() {
if (registrationBuilder_ == null) {
registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>(
registration_,
getParentForChildren(),
isClean());
registration_ = null;
}
return registrationBuilder_;
}
// repeated .StorageReportProto reports = 2;
private java.util.List reports_ =
java.util.Collections.emptyList();
private void ensureReportsIsMutable() {
if (!((bitField0_ & 0x00000002) == 0x00000002)) {
reports_ = new java.util.ArrayList(reports_);
bitField0_ |= 0x00000002;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProtoOrBuilder> reportsBuilder_;
public java.util.List getReportsList() {
if (reportsBuilder_ == null) {
return java.util.Collections.unmodifiableList(reports_);
} else {
return reportsBuilder_.getMessageList();
}
}
public int getReportsCount() {
if (reportsBuilder_ == null) {
return reports_.size();
} else {
return reportsBuilder_.getCount();
}
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto getReports(int index) {
if (reportsBuilder_ == null) {
return reports_.get(index);
} else {
return reportsBuilder_.getMessage(index);
}
}
public Builder setReports(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto value) {
if (reportsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureReportsIsMutable();
reports_.set(index, value);
onChanged();
} else {
reportsBuilder_.setMessage(index, value);
}
return this;
}
public Builder setReports(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto.Builder builderForValue) {
if (reportsBuilder_ == null) {
ensureReportsIsMutable();
reports_.set(index, builderForValue.build());
onChanged();
} else {
reportsBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
public Builder addReports(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto value) {
if (reportsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureReportsIsMutable();
reports_.add(value);
onChanged();
} else {
reportsBuilder_.addMessage(value);
}
return this;
}
public Builder addReports(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto value) {
if (reportsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureReportsIsMutable();
reports_.add(index, value);
onChanged();
} else {
reportsBuilder_.addMessage(index, value);
}
return this;
}
public Builder addReports(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto.Builder builderForValue) {
if (reportsBuilder_ == null) {
ensureReportsIsMutable();
reports_.add(builderForValue.build());
onChanged();
} else {
reportsBuilder_.addMessage(builderForValue.build());
}
return this;
}
public Builder addReports(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto.Builder builderForValue) {
if (reportsBuilder_ == null) {
ensureReportsIsMutable();
reports_.add(index, builderForValue.build());
onChanged();
} else {
reportsBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
public Builder addAllReports(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto> values) {
if (reportsBuilder_ == null) {
ensureReportsIsMutable();
super.addAll(values, reports_);
onChanged();
} else {
reportsBuilder_.addAllMessages(values);
}
return this;
}
public Builder clearReports() {
if (reportsBuilder_ == null) {
reports_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
} else {
reportsBuilder_.clear();
}
return this;
}
public Builder removeReports(int index) {
if (reportsBuilder_ == null) {
ensureReportsIsMutable();
reports_.remove(index);
onChanged();
} else {
reportsBuilder_.remove(index);
}
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto.Builder getReportsBuilder(
int index) {
return getReportsFieldBuilder().getBuilder(index);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProtoOrBuilder getReportsOrBuilder(
int index) {
if (reportsBuilder_ == null) {
return reports_.get(index); } else {
return reportsBuilder_.getMessageOrBuilder(index);
}
}
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProtoOrBuilder>
getReportsOrBuilderList() {
if (reportsBuilder_ != null) {
return reportsBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(reports_);
}
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto.Builder addReportsBuilder() {
return getReportsFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto.getDefaultInstance());
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto.Builder addReportsBuilder(
int index) {
return getReportsFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto.getDefaultInstance());
}
public java.util.List
getReportsBuilderList() {
return getReportsFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProtoOrBuilder>
getReportsFieldBuilder() {
if (reportsBuilder_ == null) {
reportsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProtoOrBuilder>(
reports_,
((bitField0_ & 0x00000002) == 0x00000002),
getParentForChildren(),
isClean());
reports_ = null;
}
return reportsBuilder_;
}
// optional uint32 xmitsInProgress = 3 [default = 0];
private int xmitsInProgress_ ;
public boolean hasXmitsInProgress() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
public int getXmitsInProgress() {
return xmitsInProgress_;
}
public Builder setXmitsInProgress(int value) {
bitField0_ |= 0x00000004;
xmitsInProgress_ = value;
onChanged();
return this;
}
public Builder clearXmitsInProgress() {
bitField0_ = (bitField0_ & ~0x00000004);
xmitsInProgress_ = 0;
onChanged();
return this;
}
// optional uint32 xceiverCount = 4 [default = 0];
private int xceiverCount_ ;
public boolean hasXceiverCount() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
public int getXceiverCount() {
return xceiverCount_;
}
public Builder setXceiverCount(int value) {
bitField0_ |= 0x00000008;
xceiverCount_ = value;
onChanged();
return this;
}
public Builder clearXceiverCount() {
bitField0_ = (bitField0_ & ~0x00000008);
xceiverCount_ = 0;
onChanged();
return this;
}
// optional uint32 failedVolumes = 5 [default = 0];
private int failedVolumes_ ;
public boolean hasFailedVolumes() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
public int getFailedVolumes() {
return failedVolumes_;
}
public Builder setFailedVolumes(int value) {
bitField0_ |= 0x00000010;
failedVolumes_ = value;
onChanged();
return this;
}
public Builder clearFailedVolumes() {
bitField0_ = (bitField0_ & ~0x00000010);
failedVolumes_ = 0;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:HeartbeatRequestProto)
}
static {
defaultInstance = new HeartbeatRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:HeartbeatRequestProto)
}
public interface StorageReportProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string storageID = 1;
boolean hasStorageID();
String getStorageID();
// optional bool failed = 2 [default = false];
boolean hasFailed();
boolean getFailed();
// optional uint64 capacity = 3 [default = 0];
boolean hasCapacity();
long getCapacity();
// optional uint64 dfsUsed = 4 [default = 0];
boolean hasDfsUsed();
long getDfsUsed();
// optional uint64 remaining = 5 [default = 0];
boolean hasRemaining();
long getRemaining();
// optional uint64 blockPoolUsed = 6 [default = 0];
boolean hasBlockPoolUsed();
long getBlockPoolUsed();
}
public static final class StorageReportProto extends
com.google.protobuf.GeneratedMessage
implements StorageReportProtoOrBuilder {
// Use StorageReportProto.newBuilder() to construct.
private StorageReportProto(Builder builder) {
super(builder);
}
private StorageReportProto(boolean noInit) {}
private static final StorageReportProto defaultInstance;
public static StorageReportProto getDefaultInstance() {
return defaultInstance;
}
public StorageReportProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_StorageReportProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_StorageReportProto_fieldAccessorTable;
}
private int bitField0_;
// required string storageID = 1;
public static final int STORAGEID_FIELD_NUMBER = 1;
private java.lang.Object storageID_;
public boolean hasStorageID() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public String getStorageID() {
java.lang.Object ref = storageID_;
if (ref instanceof String) {
return (String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
String s = bs.toStringUtf8();
if (com.google.protobuf.Internal.isValidUtf8(bs)) {
storageID_ = s;
}
return s;
}
}
private com.google.protobuf.ByteString getStorageIDBytes() {
java.lang.Object ref = storageID_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8((String) ref);
storageID_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// optional bool failed = 2 [default = false];
public static final int FAILED_FIELD_NUMBER = 2;
private boolean failed_;
public boolean hasFailed() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public boolean getFailed() {
return failed_;
}
// optional uint64 capacity = 3 [default = 0];
public static final int CAPACITY_FIELD_NUMBER = 3;
private long capacity_;
public boolean hasCapacity() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
public long getCapacity() {
return capacity_;
}
// optional uint64 dfsUsed = 4 [default = 0];
public static final int DFSUSED_FIELD_NUMBER = 4;
private long dfsUsed_;
public boolean hasDfsUsed() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
public long getDfsUsed() {
return dfsUsed_;
}
// optional uint64 remaining = 5 [default = 0];
public static final int REMAINING_FIELD_NUMBER = 5;
private long remaining_;
public boolean hasRemaining() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
public long getRemaining() {
return remaining_;
}
// optional uint64 blockPoolUsed = 6 [default = 0];
public static final int BLOCKPOOLUSED_FIELD_NUMBER = 6;
private long blockPoolUsed_;
public boolean hasBlockPoolUsed() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
public long getBlockPoolUsed() {
return blockPoolUsed_;
}
private void initFields() {
storageID_ = "";
failed_ = false;
capacity_ = 0L;
dfsUsed_ = 0L;
remaining_ = 0L;
blockPoolUsed_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasStorageID()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getStorageIDBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBool(2, failed_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt64(3, capacity_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt64(4, dfsUsed_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeUInt64(5, remaining_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeUInt64(6, blockPoolUsed_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getStorageIDBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(2, failed_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(3, capacity_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(4, dfsUsed_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(5, remaining_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(6, blockPoolUsed_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto) obj;
boolean result = true;
result = result && (hasStorageID() == other.hasStorageID());
if (hasStorageID()) {
result = result && getStorageID()
.equals(other.getStorageID());
}
result = result && (hasFailed() == other.hasFailed());
if (hasFailed()) {
result = result && (getFailed()
== other.getFailed());
}
result = result && (hasCapacity() == other.hasCapacity());
if (hasCapacity()) {
result = result && (getCapacity()
== other.getCapacity());
}
result = result && (hasDfsUsed() == other.hasDfsUsed());
if (hasDfsUsed()) {
result = result && (getDfsUsed()
== other.getDfsUsed());
}
result = result && (hasRemaining() == other.hasRemaining());
if (hasRemaining()) {
result = result && (getRemaining()
== other.getRemaining());
}
result = result && (hasBlockPoolUsed() == other.hasBlockPoolUsed());
if (hasBlockPoolUsed()) {
result = result && (getBlockPoolUsed()
== other.getBlockPoolUsed());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasStorageID()) {
hash = (37 * hash) + STORAGEID_FIELD_NUMBER;
hash = (53 * hash) + getStorageID().hashCode();
}
if (hasFailed()) {
hash = (37 * hash) + FAILED_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getFailed());
}
if (hasCapacity()) {
hash = (37 * hash) + CAPACITY_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getCapacity());
}
if (hasDfsUsed()) {
hash = (37 * hash) + DFSUSED_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getDfsUsed());
}
if (hasRemaining()) {
hash = (37 * hash) + REMAINING_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getRemaining());
}
if (hasBlockPoolUsed()) {
hash = (37 * hash) + BLOCKPOOLUSED_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getBlockPoolUsed());
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_StorageReportProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_StorageReportProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
storageID_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
failed_ = false;
bitField0_ = (bitField0_ & ~0x00000002);
capacity_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
dfsUsed_ = 0L;
bitField0_ = (bitField0_ & ~0x00000008);
remaining_ = 0L;
bitField0_ = (bitField0_ & ~0x00000010);
blockPoolUsed_ = 0L;
bitField0_ = (bitField0_ & ~0x00000020);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.storageID_ = storageID_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.failed_ = failed_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.capacity_ = capacity_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.dfsUsed_ = dfsUsed_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.remaining_ = remaining_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000020;
}
result.blockPoolUsed_ = blockPoolUsed_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto.getDefaultInstance()) return this;
if (other.hasStorageID()) {
setStorageID(other.getStorageID());
}
if (other.hasFailed()) {
setFailed(other.getFailed());
}
if (other.hasCapacity()) {
setCapacity(other.getCapacity());
}
if (other.hasDfsUsed()) {
setDfsUsed(other.getDfsUsed());
}
if (other.hasRemaining()) {
setRemaining(other.getRemaining());
}
if (other.hasBlockPoolUsed()) {
setBlockPoolUsed(other.getBlockPoolUsed());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasStorageID()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
storageID_ = input.readBytes();
break;
}
case 16: {
bitField0_ |= 0x00000002;
failed_ = input.readBool();
break;
}
case 24: {
bitField0_ |= 0x00000004;
capacity_ = input.readUInt64();
break;
}
case 32: {
bitField0_ |= 0x00000008;
dfsUsed_ = input.readUInt64();
break;
}
case 40: {
bitField0_ |= 0x00000010;
remaining_ = input.readUInt64();
break;
}
case 48: {
bitField0_ |= 0x00000020;
blockPoolUsed_ = input.readUInt64();
break;
}
}
}
}
private int bitField0_;
// required string storageID = 1;
private java.lang.Object storageID_ = "";
public boolean hasStorageID() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public String getStorageID() {
java.lang.Object ref = storageID_;
if (!(ref instanceof String)) {
String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
storageID_ = s;
return s;
} else {
return (String) ref;
}
}
public Builder setStorageID(String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
storageID_ = value;
onChanged();
return this;
}
public Builder clearStorageID() {
bitField0_ = (bitField0_ & ~0x00000001);
storageID_ = getDefaultInstance().getStorageID();
onChanged();
return this;
}
void setStorageID(com.google.protobuf.ByteString value) {
bitField0_ |= 0x00000001;
storageID_ = value;
onChanged();
}
// optional bool failed = 2 [default = false];
private boolean failed_ ;
public boolean hasFailed() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public boolean getFailed() {
return failed_;
}
public Builder setFailed(boolean value) {
bitField0_ |= 0x00000002;
failed_ = value;
onChanged();
return this;
}
public Builder clearFailed() {
bitField0_ = (bitField0_ & ~0x00000002);
failed_ = false;
onChanged();
return this;
}
// optional uint64 capacity = 3 [default = 0];
private long capacity_ ;
public boolean hasCapacity() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
public long getCapacity() {
return capacity_;
}
public Builder setCapacity(long value) {
bitField0_ |= 0x00000004;
capacity_ = value;
onChanged();
return this;
}
public Builder clearCapacity() {
bitField0_ = (bitField0_ & ~0x00000004);
capacity_ = 0L;
onChanged();
return this;
}
// optional uint64 dfsUsed = 4 [default = 0];
private long dfsUsed_ ;
public boolean hasDfsUsed() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
public long getDfsUsed() {
return dfsUsed_;
}
public Builder setDfsUsed(long value) {
bitField0_ |= 0x00000008;
dfsUsed_ = value;
onChanged();
return this;
}
public Builder clearDfsUsed() {
bitField0_ = (bitField0_ & ~0x00000008);
dfsUsed_ = 0L;
onChanged();
return this;
}
// optional uint64 remaining = 5 [default = 0];
private long remaining_ ;
public boolean hasRemaining() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
public long getRemaining() {
return remaining_;
}
public Builder setRemaining(long value) {
bitField0_ |= 0x00000010;
remaining_ = value;
onChanged();
return this;
}
public Builder clearRemaining() {
bitField0_ = (bitField0_ & ~0x00000010);
remaining_ = 0L;
onChanged();
return this;
}
// optional uint64 blockPoolUsed = 6 [default = 0];
private long blockPoolUsed_ ;
public boolean hasBlockPoolUsed() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
public long getBlockPoolUsed() {
return blockPoolUsed_;
}
public Builder setBlockPoolUsed(long value) {
bitField0_ |= 0x00000020;
blockPoolUsed_ = value;
onChanged();
return this;
}
public Builder clearBlockPoolUsed() {
bitField0_ = (bitField0_ & ~0x00000020);
blockPoolUsed_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:StorageReportProto)
}
static {
defaultInstance = new StorageReportProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:StorageReportProto)
}
public interface NNHAStatusHeartbeatProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .NNHAStatusHeartbeatProto.State state = 1;
boolean hasState();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.State getState();
// required uint64 txid = 2;
boolean hasTxid();
long getTxid();
}
public static final class NNHAStatusHeartbeatProto extends
com.google.protobuf.GeneratedMessage
implements NNHAStatusHeartbeatProtoOrBuilder {
// Use NNHAStatusHeartbeatProto.newBuilder() to construct.
private NNHAStatusHeartbeatProto(Builder builder) {
super(builder);
}
private NNHAStatusHeartbeatProto(boolean noInit) {}
private static final NNHAStatusHeartbeatProto defaultInstance;
public static NNHAStatusHeartbeatProto getDefaultInstance() {
return defaultInstance;
}
public NNHAStatusHeartbeatProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_NNHAStatusHeartbeatProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_NNHAStatusHeartbeatProto_fieldAccessorTable;
}
public enum State
implements com.google.protobuf.ProtocolMessageEnum {
ACTIVE(0, 0),
STANDBY(1, 1),
;
public static final int ACTIVE_VALUE = 0;
public static final int STANDBY_VALUE = 1;
public final int getNumber() { return value; }
public static State valueOf(int value) {
switch (value) {
case 0: return ACTIVE;
case 1: return STANDBY;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public State findValueByNumber(int number) {
return State.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.getDescriptor().getEnumTypes().get(0);
}
private static final State[] VALUES = {
ACTIVE, STANDBY,
};
public static State valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private State(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:NNHAStatusHeartbeatProto.State)
}
private int bitField0_;
// required .NNHAStatusHeartbeatProto.State state = 1;
public static final int STATE_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.State state_;
public boolean hasState() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.State getState() {
return state_;
}
// required uint64 txid = 2;
public static final int TXID_FIELD_NUMBER = 2;
private long txid_;
public boolean hasTxid() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public long getTxid() {
return txid_;
}
private void initFields() {
state_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.State.ACTIVE;
txid_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasState()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasTxid()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, state_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, txid_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, state_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, txid_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto) obj;
boolean result = true;
result = result && (hasState() == other.hasState());
if (hasState()) {
result = result &&
(getState() == other.getState());
}
result = result && (hasTxid() == other.hasTxid());
if (hasTxid()) {
result = result && (getTxid()
== other.getTxid());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasState()) {
hash = (37 * hash) + STATE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getState());
}
if (hasTxid()) {
hash = (37 * hash) + TXID_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getTxid());
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_NNHAStatusHeartbeatProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_NNHAStatusHeartbeatProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
state_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.State.ACTIVE;
bitField0_ = (bitField0_ & ~0x00000001);
txid_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.state_ = state_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.txid_ = txid_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.getDefaultInstance()) return this;
if (other.hasState()) {
setState(other.getState());
}
if (other.hasTxid()) {
setTxid(other.getTxid());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasState()) {
return false;
}
if (!hasTxid()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.State value = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.State.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
state_ = value;
}
break;
}
case 16: {
bitField0_ |= 0x00000002;
txid_ = input.readUInt64();
break;
}
}
}
}
private int bitField0_;
// required .NNHAStatusHeartbeatProto.State state = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.State state_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.State.ACTIVE;
public boolean hasState() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.State getState() {
return state_;
}
public Builder setState(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.State value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
state_ = value;
onChanged();
return this;
}
public Builder clearState() {
bitField0_ = (bitField0_ & ~0x00000001);
state_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.State.ACTIVE;
onChanged();
return this;
}
// required uint64 txid = 2;
private long txid_ ;
public boolean hasTxid() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public long getTxid() {
return txid_;
}
public Builder setTxid(long value) {
bitField0_ |= 0x00000002;
txid_ = value;
onChanged();
return this;
}
public Builder clearTxid() {
bitField0_ = (bitField0_ & ~0x00000002);
txid_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:NNHAStatusHeartbeatProto)
}
static {
defaultInstance = new NNHAStatusHeartbeatProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:NNHAStatusHeartbeatProto)
}
public interface HeartbeatResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated .DatanodeCommandProto cmds = 1;
java.util.List
getCmdsList();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmds(int index);
int getCmdsCount();
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder>
getCmdsOrBuilderList();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdsOrBuilder(
int index);
// required .NNHAStatusHeartbeatProto haStatus = 2;
boolean hasHaStatus();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto getHaStatus();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProtoOrBuilder getHaStatusOrBuilder();
}
public static final class HeartbeatResponseProto extends
com.google.protobuf.GeneratedMessage
implements HeartbeatResponseProtoOrBuilder {
// Use HeartbeatResponseProto.newBuilder() to construct.
private HeartbeatResponseProto(Builder builder) {
super(builder);
}
private HeartbeatResponseProto(boolean noInit) {}
private static final HeartbeatResponseProto defaultInstance;
public static HeartbeatResponseProto getDefaultInstance() {
return defaultInstance;
}
public HeartbeatResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_HeartbeatResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_HeartbeatResponseProto_fieldAccessorTable;
}
private int bitField0_;
// repeated .DatanodeCommandProto cmds = 1;
public static final int CMDS_FIELD_NUMBER = 1;
private java.util.List cmds_;
public java.util.List getCmdsList() {
return cmds_;
}
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder>
getCmdsOrBuilderList() {
return cmds_;
}
public int getCmdsCount() {
return cmds_.size();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmds(int index) {
return cmds_.get(index);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdsOrBuilder(
int index) {
return cmds_.get(index);
}
// required .NNHAStatusHeartbeatProto haStatus = 2;
public static final int HASTATUS_FIELD_NUMBER = 2;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto haStatus_;
public boolean hasHaStatus() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto getHaStatus() {
return haStatus_;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProtoOrBuilder getHaStatusOrBuilder() {
return haStatus_;
}
private void initFields() {
cmds_ = java.util.Collections.emptyList();
haStatus_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasHaStatus()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getCmdsCount(); i++) {
if (!getCmds(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (!getHaStatus().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < cmds_.size(); i++) {
output.writeMessage(1, cmds_.get(i));
}
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(2, haStatus_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
for (int i = 0; i < cmds_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, cmds_.get(i));
}
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, haStatus_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto) obj;
boolean result = true;
result = result && getCmdsList()
.equals(other.getCmdsList());
result = result && (hasHaStatus() == other.hasHaStatus());
if (hasHaStatus()) {
result = result && getHaStatus()
.equals(other.getHaStatus());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (getCmdsCount() > 0) {
hash = (37 * hash) + CMDS_FIELD_NUMBER;
hash = (53 * hash) + getCmdsList().hashCode();
}
if (hasHaStatus()) {
hash = (37 * hash) + HASTATUS_FIELD_NUMBER;
hash = (53 * hash) + getHaStatus().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_HeartbeatResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_HeartbeatResponseProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getCmdsFieldBuilder();
getHaStatusFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (cmdsBuilder_ == null) {
cmds_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
} else {
cmdsBuilder_.clear();
}
if (haStatusBuilder_ == null) {
haStatus_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.getDefaultInstance();
} else {
haStatusBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (cmdsBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001)) {
cmds_ = java.util.Collections.unmodifiableList(cmds_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.cmds_ = cmds_;
} else {
result.cmds_ = cmdsBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000001;
}
if (haStatusBuilder_ == null) {
result.haStatus_ = haStatus_;
} else {
result.haStatus_ = haStatusBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDefaultInstance()) return this;
if (cmdsBuilder_ == null) {
if (!other.cmds_.isEmpty()) {
if (cmds_.isEmpty()) {
cmds_ = other.cmds_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureCmdsIsMutable();
cmds_.addAll(other.cmds_);
}
onChanged();
}
} else {
if (!other.cmds_.isEmpty()) {
if (cmdsBuilder_.isEmpty()) {
cmdsBuilder_.dispose();
cmdsBuilder_ = null;
cmds_ = other.cmds_;
bitField0_ = (bitField0_ & ~0x00000001);
cmdsBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getCmdsFieldBuilder() : null;
} else {
cmdsBuilder_.addAllMessages(other.cmds_);
}
}
}
if (other.hasHaStatus()) {
mergeHaStatus(other.getHaStatus());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasHaStatus()) {
return false;
}
for (int i = 0; i < getCmdsCount(); i++) {
if (!getCmds(i).isInitialized()) {
return false;
}
}
if (!getHaStatus().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.newBuilder();
input.readMessage(subBuilder, extensionRegistry);
addCmds(subBuilder.buildPartial());
break;
}
case 18: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.newBuilder();
if (hasHaStatus()) {
subBuilder.mergeFrom(getHaStatus());
}
input.readMessage(subBuilder, extensionRegistry);
setHaStatus(subBuilder.buildPartial());
break;
}
}
}
}
private int bitField0_;
// repeated .DatanodeCommandProto cmds = 1;
private java.util.List cmds_ =
java.util.Collections.emptyList();
private void ensureCmdsIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
cmds_ = new java.util.ArrayList(cmds_);
bitField0_ |= 0x00000001;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder> cmdsBuilder_;
public java.util.List getCmdsList() {
if (cmdsBuilder_ == null) {
return java.util.Collections.unmodifiableList(cmds_);
} else {
return cmdsBuilder_.getMessageList();
}
}
public int getCmdsCount() {
if (cmdsBuilder_ == null) {
return cmds_.size();
} else {
return cmdsBuilder_.getCount();
}
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmds(int index) {
if (cmdsBuilder_ == null) {
return cmds_.get(index);
} else {
return cmdsBuilder_.getMessage(index);
}
}
public Builder setCmds(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto value) {
if (cmdsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureCmdsIsMutable();
cmds_.set(index, value);
onChanged();
} else {
cmdsBuilder_.setMessage(index, value);
}
return this;
}
public Builder setCmds(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder builderForValue) {
if (cmdsBuilder_ == null) {
ensureCmdsIsMutable();
cmds_.set(index, builderForValue.build());
onChanged();
} else {
cmdsBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
public Builder addCmds(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto value) {
if (cmdsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureCmdsIsMutable();
cmds_.add(value);
onChanged();
} else {
cmdsBuilder_.addMessage(value);
}
return this;
}
public Builder addCmds(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto value) {
if (cmdsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureCmdsIsMutable();
cmds_.add(index, value);
onChanged();
} else {
cmdsBuilder_.addMessage(index, value);
}
return this;
}
public Builder addCmds(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder builderForValue) {
if (cmdsBuilder_ == null) {
ensureCmdsIsMutable();
cmds_.add(builderForValue.build());
onChanged();
} else {
cmdsBuilder_.addMessage(builderForValue.build());
}
return this;
}
public Builder addCmds(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder builderForValue) {
if (cmdsBuilder_ == null) {
ensureCmdsIsMutable();
cmds_.add(index, builderForValue.build());
onChanged();
} else {
cmdsBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
public Builder addAllCmds(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto> values) {
if (cmdsBuilder_ == null) {
ensureCmdsIsMutable();
super.addAll(values, cmds_);
onChanged();
} else {
cmdsBuilder_.addAllMessages(values);
}
return this;
}
public Builder clearCmds() {
if (cmdsBuilder_ == null) {
cmds_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
} else {
cmdsBuilder_.clear();
}
return this;
}
public Builder removeCmds(int index) {
if (cmdsBuilder_ == null) {
ensureCmdsIsMutable();
cmds_.remove(index);
onChanged();
} else {
cmdsBuilder_.remove(index);
}
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder getCmdsBuilder(
int index) {
return getCmdsFieldBuilder().getBuilder(index);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdsOrBuilder(
int index) {
if (cmdsBuilder_ == null) {
return cmds_.get(index); } else {
return cmdsBuilder_.getMessageOrBuilder(index);
}
}
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder>
getCmdsOrBuilderList() {
if (cmdsBuilder_ != null) {
return cmdsBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(cmds_);
}
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder addCmdsBuilder() {
return getCmdsFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance());
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder addCmdsBuilder(
int index) {
return getCmdsFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance());
}
public java.util.List
getCmdsBuilderList() {
return getCmdsFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder>
getCmdsFieldBuilder() {
if (cmdsBuilder_ == null) {
cmdsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder>(
cmds_,
((bitField0_ & 0x00000001) == 0x00000001),
getParentForChildren(),
isClean());
cmds_ = null;
}
return cmdsBuilder_;
}
// required .NNHAStatusHeartbeatProto haStatus = 2;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto haStatus_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProtoOrBuilder> haStatusBuilder_;
public boolean hasHaStatus() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto getHaStatus() {
if (haStatusBuilder_ == null) {
return haStatus_;
} else {
return haStatusBuilder_.getMessage();
}
}
public Builder setHaStatus(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto value) {
if (haStatusBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
haStatus_ = value;
onChanged();
} else {
haStatusBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
return this;
}
public Builder setHaStatus(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.Builder builderForValue) {
if (haStatusBuilder_ == null) {
haStatus_ = builderForValue.build();
onChanged();
} else {
haStatusBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
return this;
}
public Builder mergeHaStatus(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto value) {
if (haStatusBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002) &&
haStatus_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.getDefaultInstance()) {
haStatus_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.newBuilder(haStatus_).mergeFrom(value).buildPartial();
} else {
haStatus_ = value;
}
onChanged();
} else {
haStatusBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
return this;
}
public Builder clearHaStatus() {
if (haStatusBuilder_ == null) {
haStatus_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.getDefaultInstance();
onChanged();
} else {
haStatusBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.Builder getHaStatusBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getHaStatusFieldBuilder().getBuilder();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProtoOrBuilder getHaStatusOrBuilder() {
if (haStatusBuilder_ != null) {
return haStatusBuilder_.getMessageOrBuilder();
} else {
return haStatus_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProtoOrBuilder>
getHaStatusFieldBuilder() {
if (haStatusBuilder_ == null) {
haStatusBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProtoOrBuilder>(
haStatus_,
getParentForChildren(),
isClean());
haStatus_ = null;
}
return haStatusBuilder_;
}
// @@protoc_insertion_point(builder_scope:HeartbeatResponseProto)
}
static {
defaultInstance = new HeartbeatResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:HeartbeatResponseProto)
}
public interface BlockReportRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .DatanodeRegistrationProto registration = 1;
boolean hasRegistration();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder();
// required string blockPoolId = 2;
boolean hasBlockPoolId();
String getBlockPoolId();
// repeated .StorageBlockReportProto reports = 3;
java.util.List
getReportsList();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto getReports(int index);
int getReportsCount();
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProtoOrBuilder>
getReportsOrBuilderList();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProtoOrBuilder getReportsOrBuilder(
int index);
}
public static final class BlockReportRequestProto extends
com.google.protobuf.GeneratedMessage
implements BlockReportRequestProtoOrBuilder {
// Use BlockReportRequestProto.newBuilder() to construct.
private BlockReportRequestProto(Builder builder) {
super(builder);
}
private BlockReportRequestProto(boolean noInit) {}
private static final BlockReportRequestProto defaultInstance;
public static BlockReportRequestProto getDefaultInstance() {
return defaultInstance;
}
public BlockReportRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReportRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReportRequestProto_fieldAccessorTable;
}
private int bitField0_;
// required .DatanodeRegistrationProto registration = 1;
public static final int REGISTRATION_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_;
public boolean hasRegistration() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
return registration_;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
return registration_;
}
// required string blockPoolId = 2;
public static final int BLOCKPOOLID_FIELD_NUMBER = 2;
private java.lang.Object blockPoolId_;
public boolean hasBlockPoolId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public String getBlockPoolId() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof String) {
return (String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
String s = bs.toStringUtf8();
if (com.google.protobuf.Internal.isValidUtf8(bs)) {
blockPoolId_ = s;
}
return s;
}
}
private com.google.protobuf.ByteString getBlockPoolIdBytes() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8((String) ref);
blockPoolId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// repeated .StorageBlockReportProto reports = 3;
public static final int REPORTS_FIELD_NUMBER = 3;
private java.util.List reports_;
public java.util.List getReportsList() {
return reports_;
}
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProtoOrBuilder>
getReportsOrBuilderList() {
return reports_;
}
public int getReportsCount() {
return reports_.size();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto getReports(int index) {
return reports_.get(index);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProtoOrBuilder getReportsOrBuilder(
int index) {
return reports_.get(index);
}
private void initFields() {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
blockPoolId_ = "";
reports_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasRegistration()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasBlockPoolId()) {
memoizedIsInitialized = 0;
return false;
}
if (!getRegistration().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getReportsCount(); i++) {
if (!getReports(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, registration_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getBlockPoolIdBytes());
}
for (int i = 0; i < reports_.size(); i++) {
output.writeMessage(3, reports_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, registration_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getBlockPoolIdBytes());
}
for (int i = 0; i < reports_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, reports_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto) obj;
boolean result = true;
result = result && (hasRegistration() == other.hasRegistration());
if (hasRegistration()) {
result = result && getRegistration()
.equals(other.getRegistration());
}
result = result && (hasBlockPoolId() == other.hasBlockPoolId());
if (hasBlockPoolId()) {
result = result && getBlockPoolId()
.equals(other.getBlockPoolId());
}
result = result && getReportsList()
.equals(other.getReportsList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasRegistration()) {
hash = (37 * hash) + REGISTRATION_FIELD_NUMBER;
hash = (53 * hash) + getRegistration().hashCode();
}
if (hasBlockPoolId()) {
hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER;
hash = (53 * hash) + getBlockPoolId().hashCode();
}
if (getReportsCount() > 0) {
hash = (37 * hash) + REPORTS_FIELD_NUMBER;
hash = (53 * hash) + getReportsList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReportRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReportRequestProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getRegistrationFieldBuilder();
getReportsFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (registrationBuilder_ == null) {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
} else {
registrationBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
blockPoolId_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
if (reportsBuilder_ == null) {
reports_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
} else {
reportsBuilder_.clear();
}
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (registrationBuilder_ == null) {
result.registration_ = registration_;
} else {
result.registration_ = registrationBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.blockPoolId_ = blockPoolId_;
if (reportsBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004)) {
reports_ = java.util.Collections.unmodifiableList(reports_);
bitField0_ = (bitField0_ & ~0x00000004);
}
result.reports_ = reports_;
} else {
result.reports_ = reportsBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.getDefaultInstance()) return this;
if (other.hasRegistration()) {
mergeRegistration(other.getRegistration());
}
if (other.hasBlockPoolId()) {
setBlockPoolId(other.getBlockPoolId());
}
if (reportsBuilder_ == null) {
if (!other.reports_.isEmpty()) {
if (reports_.isEmpty()) {
reports_ = other.reports_;
bitField0_ = (bitField0_ & ~0x00000004);
} else {
ensureReportsIsMutable();
reports_.addAll(other.reports_);
}
onChanged();
}
} else {
if (!other.reports_.isEmpty()) {
if (reportsBuilder_.isEmpty()) {
reportsBuilder_.dispose();
reportsBuilder_ = null;
reports_ = other.reports_;
bitField0_ = (bitField0_ & ~0x00000004);
reportsBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getReportsFieldBuilder() : null;
} else {
reportsBuilder_.addAllMessages(other.reports_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasRegistration()) {
return false;
}
if (!hasBlockPoolId()) {
return false;
}
if (!getRegistration().isInitialized()) {
return false;
}
for (int i = 0; i < getReportsCount(); i++) {
if (!getReports(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder();
if (hasRegistration()) {
subBuilder.mergeFrom(getRegistration());
}
input.readMessage(subBuilder, extensionRegistry);
setRegistration(subBuilder.buildPartial());
break;
}
case 18: {
bitField0_ |= 0x00000002;
blockPoolId_ = input.readBytes();
break;
}
case 26: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.newBuilder();
input.readMessage(subBuilder, extensionRegistry);
addReports(subBuilder.buildPartial());
break;
}
}
}
}
private int bitField0_;
// required .DatanodeRegistrationProto registration = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registrationBuilder_;
public boolean hasRegistration() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
if (registrationBuilder_ == null) {
return registration_;
} else {
return registrationBuilder_.getMessage();
}
}
public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
if (registrationBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
registration_ = value;
onChanged();
} else {
registrationBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder setRegistration(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) {
if (registrationBuilder_ == null) {
registration_ = builderForValue.build();
onChanged();
} else {
registrationBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
if (registrationBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
registration_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) {
registration_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial();
} else {
registration_ = value;
}
onChanged();
} else {
registrationBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder clearRegistration() {
if (registrationBuilder_ == null) {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
onChanged();
} else {
registrationBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistrationBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getRegistrationFieldBuilder().getBuilder();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
if (registrationBuilder_ != null) {
return registrationBuilder_.getMessageOrBuilder();
} else {
return registration_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>
getRegistrationFieldBuilder() {
if (registrationBuilder_ == null) {
registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>(
registration_,
getParentForChildren(),
isClean());
registration_ = null;
}
return registrationBuilder_;
}
// required string blockPoolId = 2;
private java.lang.Object blockPoolId_ = "";
public boolean hasBlockPoolId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public String getBlockPoolId() {
java.lang.Object ref = blockPoolId_;
if (!(ref instanceof String)) {
String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
blockPoolId_ = s;
return s;
} else {
return (String) ref;
}
}
public Builder setBlockPoolId(String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
blockPoolId_ = value;
onChanged();
return this;
}
public Builder clearBlockPoolId() {
bitField0_ = (bitField0_ & ~0x00000002);
blockPoolId_ = getDefaultInstance().getBlockPoolId();
onChanged();
return this;
}
void setBlockPoolId(com.google.protobuf.ByteString value) {
bitField0_ |= 0x00000002;
blockPoolId_ = value;
onChanged();
}
// repeated .StorageBlockReportProto reports = 3;
private java.util.List reports_ =
java.util.Collections.emptyList();
private void ensureReportsIsMutable() {
if (!((bitField0_ & 0x00000004) == 0x00000004)) {
reports_ = new java.util.ArrayList(reports_);
bitField0_ |= 0x00000004;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProtoOrBuilder> reportsBuilder_;
public java.util.List getReportsList() {
if (reportsBuilder_ == null) {
return java.util.Collections.unmodifiableList(reports_);
} else {
return reportsBuilder_.getMessageList();
}
}
public int getReportsCount() {
if (reportsBuilder_ == null) {
return reports_.size();
} else {
return reportsBuilder_.getCount();
}
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto getReports(int index) {
if (reportsBuilder_ == null) {
return reports_.get(index);
} else {
return reportsBuilder_.getMessage(index);
}
}
public Builder setReports(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto value) {
if (reportsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureReportsIsMutable();
reports_.set(index, value);
onChanged();
} else {
reportsBuilder_.setMessage(index, value);
}
return this;
}
public Builder setReports(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder builderForValue) {
if (reportsBuilder_ == null) {
ensureReportsIsMutable();
reports_.set(index, builderForValue.build());
onChanged();
} else {
reportsBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
public Builder addReports(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto value) {
if (reportsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureReportsIsMutable();
reports_.add(value);
onChanged();
} else {
reportsBuilder_.addMessage(value);
}
return this;
}
public Builder addReports(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto value) {
if (reportsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureReportsIsMutable();
reports_.add(index, value);
onChanged();
} else {
reportsBuilder_.addMessage(index, value);
}
return this;
}
public Builder addReports(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder builderForValue) {
if (reportsBuilder_ == null) {
ensureReportsIsMutable();
reports_.add(builderForValue.build());
onChanged();
} else {
reportsBuilder_.addMessage(builderForValue.build());
}
return this;
}
public Builder addReports(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder builderForValue) {
if (reportsBuilder_ == null) {
ensureReportsIsMutable();
reports_.add(index, builderForValue.build());
onChanged();
} else {
reportsBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
public Builder addAllReports(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto> values) {
if (reportsBuilder_ == null) {
ensureReportsIsMutable();
super.addAll(values, reports_);
onChanged();
} else {
reportsBuilder_.addAllMessages(values);
}
return this;
}
public Builder clearReports() {
if (reportsBuilder_ == null) {
reports_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
onChanged();
} else {
reportsBuilder_.clear();
}
return this;
}
public Builder removeReports(int index) {
if (reportsBuilder_ == null) {
ensureReportsIsMutable();
reports_.remove(index);
onChanged();
} else {
reportsBuilder_.remove(index);
}
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder getReportsBuilder(
int index) {
return getReportsFieldBuilder().getBuilder(index);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProtoOrBuilder getReportsOrBuilder(
int index) {
if (reportsBuilder_ == null) {
return reports_.get(index); } else {
return reportsBuilder_.getMessageOrBuilder(index);
}
}
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProtoOrBuilder>
getReportsOrBuilderList() {
if (reportsBuilder_ != null) {
return reportsBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(reports_);
}
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder addReportsBuilder() {
return getReportsFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.getDefaultInstance());
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder addReportsBuilder(
int index) {
return getReportsFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.getDefaultInstance());
}
public java.util.List
getReportsBuilderList() {
return getReportsFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProtoOrBuilder>
getReportsFieldBuilder() {
if (reportsBuilder_ == null) {
reportsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProtoOrBuilder>(
reports_,
((bitField0_ & 0x00000004) == 0x00000004),
getParentForChildren(),
isClean());
reports_ = null;
}
return reportsBuilder_;
}
// @@protoc_insertion_point(builder_scope:BlockReportRequestProto)
}
static {
defaultInstance = new BlockReportRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:BlockReportRequestProto)
}
public interface StorageBlockReportProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .DatanodeStorageProto storage = 1;
boolean hasStorage();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto getStorage();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder();
// repeated uint64 blocks = 2 [packed = true];
java.util.List getBlocksList();
int getBlocksCount();
long getBlocks(int index);
}
public static final class StorageBlockReportProto extends
com.google.protobuf.GeneratedMessage
implements StorageBlockReportProtoOrBuilder {
// Use StorageBlockReportProto.newBuilder() to construct.
private StorageBlockReportProto(Builder builder) {
super(builder);
}
private StorageBlockReportProto(boolean noInit) {}
private static final StorageBlockReportProto defaultInstance;
public static StorageBlockReportProto getDefaultInstance() {
return defaultInstance;
}
public StorageBlockReportProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_StorageBlockReportProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_StorageBlockReportProto_fieldAccessorTable;
}
private int bitField0_;
// required .DatanodeStorageProto storage = 1;
public static final int STORAGE_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto storage_;
public boolean hasStorage() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto getStorage() {
return storage_;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder() {
return storage_;
}
// repeated uint64 blocks = 2 [packed = true];
public static final int BLOCKS_FIELD_NUMBER = 2;
private java.util.List blocks_;
public java.util.List
getBlocksList() {
return blocks_;
}
public int getBlocksCount() {
return blocks_.size();
}
public long getBlocks(int index) {
return blocks_.get(index);
}
private int blocksMemoizedSerializedSize = -1;
private void initFields() {
storage_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.getDefaultInstance();
blocks_ = java.util.Collections.emptyList();;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasStorage()) {
memoizedIsInitialized = 0;
return false;
}
if (!getStorage().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, storage_);
}
if (getBlocksList().size() > 0) {
output.writeRawVarint32(18);
output.writeRawVarint32(blocksMemoizedSerializedSize);
}
for (int i = 0; i < blocks_.size(); i++) {
output.writeUInt64NoTag(blocks_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, storage_);
}
{
int dataSize = 0;
for (int i = 0; i < blocks_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeUInt64SizeNoTag(blocks_.get(i));
}
size += dataSize;
if (!getBlocksList().isEmpty()) {
size += 1;
size += com.google.protobuf.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
blocksMemoizedSerializedSize = dataSize;
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto) obj;
boolean result = true;
result = result && (hasStorage() == other.hasStorage());
if (hasStorage()) {
result = result && getStorage()
.equals(other.getStorage());
}
result = result && getBlocksList()
.equals(other.getBlocksList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasStorage()) {
hash = (37 * hash) + STORAGE_FIELD_NUMBER;
hash = (53 * hash) + getStorage().hashCode();
}
if (getBlocksCount() > 0) {
hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
hash = (53 * hash) + getBlocksList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_StorageBlockReportProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_StorageBlockReportProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getStorageFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (storageBuilder_ == null) {
storage_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.getDefaultInstance();
} else {
storageBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
blocks_ = java.util.Collections.emptyList();;
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (storageBuilder_ == null) {
result.storage_ = storage_;
} else {
result.storage_ = storageBuilder_.build();
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
blocks_ = java.util.Collections.unmodifiableList(blocks_);
bitField0_ = (bitField0_ & ~0x00000002);
}
result.blocks_ = blocks_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.getDefaultInstance()) return this;
if (other.hasStorage()) {
mergeStorage(other.getStorage());
}
if (!other.blocks_.isEmpty()) {
if (blocks_.isEmpty()) {
blocks_ = other.blocks_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureBlocksIsMutable();
blocks_.addAll(other.blocks_);
}
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasStorage()) {
return false;
}
if (!getStorage().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.newBuilder();
if (hasStorage()) {
subBuilder.mergeFrom(getStorage());
}
input.readMessage(subBuilder, extensionRegistry);
setStorage(subBuilder.buildPartial());
break;
}
case 16: {
ensureBlocksIsMutable();
blocks_.add(input.readUInt64());
break;
}
case 18: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
while (input.getBytesUntilLimit() > 0) {
addBlocks(input.readUInt64());
}
input.popLimit(limit);
break;
}
}
}
}
private int bitField0_;
// required .DatanodeStorageProto storage = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto storage_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProtoOrBuilder> storageBuilder_;
public boolean hasStorage() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto getStorage() {
if (storageBuilder_ == null) {
return storage_;
} else {
return storageBuilder_.getMessage();
}
}
public Builder setStorage(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto value) {
if (storageBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
storage_ = value;
onChanged();
} else {
storageBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder setStorage(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.Builder builderForValue) {
if (storageBuilder_ == null) {
storage_ = builderForValue.build();
onChanged();
} else {
storageBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
public Builder mergeStorage(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto value) {
if (storageBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
storage_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.getDefaultInstance()) {
storage_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.newBuilder(storage_).mergeFrom(value).buildPartial();
} else {
storage_ = value;
}
onChanged();
} else {
storageBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder clearStorage() {
if (storageBuilder_ == null) {
storage_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.getDefaultInstance();
onChanged();
} else {
storageBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.Builder getStorageBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getStorageFieldBuilder().getBuilder();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder() {
if (storageBuilder_ != null) {
return storageBuilder_.getMessageOrBuilder();
} else {
return storage_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProtoOrBuilder>
getStorageFieldBuilder() {
if (storageBuilder_ == null) {
storageBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProtoOrBuilder>(
storage_,
getParentForChildren(),
isClean());
storage_ = null;
}
return storageBuilder_;
}
// repeated uint64 blocks = 2 [packed = true];
private java.util.List blocks_ = java.util.Collections.emptyList();;
private void ensureBlocksIsMutable() {
if (!((bitField0_ & 0x00000002) == 0x00000002)) {
blocks_ = new java.util.ArrayList(blocks_);
bitField0_ |= 0x00000002;
}
}
public java.util.List
getBlocksList() {
return java.util.Collections.unmodifiableList(blocks_);
}
public int getBlocksCount() {
return blocks_.size();
}
public long getBlocks(int index) {
return blocks_.get(index);
}
public Builder setBlocks(
int index, long value) {
ensureBlocksIsMutable();
blocks_.set(index, value);
onChanged();
return this;
}
public Builder addBlocks(long value) {
ensureBlocksIsMutable();
blocks_.add(value);
onChanged();
return this;
}
public Builder addAllBlocks(
java.lang.Iterable extends java.lang.Long> values) {
ensureBlocksIsMutable();
super.addAll(values, blocks_);
onChanged();
return this;
}
public Builder clearBlocks() {
blocks_ = java.util.Collections.emptyList();;
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:StorageBlockReportProto)
}
static {
defaultInstance = new StorageBlockReportProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:StorageBlockReportProto)
}
public interface BlockReportResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// optional .DatanodeCommandProto cmd = 1;
boolean hasCmd();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmd();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdOrBuilder();
}
public static final class BlockReportResponseProto extends
com.google.protobuf.GeneratedMessage
implements BlockReportResponseProtoOrBuilder {
// Use BlockReportResponseProto.newBuilder() to construct.
private BlockReportResponseProto(Builder builder) {
super(builder);
}
private BlockReportResponseProto(boolean noInit) {}
private static final BlockReportResponseProto defaultInstance;
public static BlockReportResponseProto getDefaultInstance() {
return defaultInstance;
}
public BlockReportResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReportResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReportResponseProto_fieldAccessorTable;
}
private int bitField0_;
// optional .DatanodeCommandProto cmd = 1;
public static final int CMD_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto cmd_;
public boolean hasCmd() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmd() {
return cmd_;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdOrBuilder() {
return cmd_;
}
private void initFields() {
cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (hasCmd()) {
if (!getCmd().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, cmd_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, cmd_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto) obj;
boolean result = true;
result = result && (hasCmd() == other.hasCmd());
if (hasCmd()) {
result = result && getCmd()
.equals(other.getCmd());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasCmd()) {
hash = (37 * hash) + CMD_FIELD_NUMBER;
hash = (53 * hash) + getCmd().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReportResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReportResponseProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getCmdFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (cmdBuilder_ == null) {
cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance();
} else {
cmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (cmdBuilder_ == null) {
result.cmd_ = cmd_;
} else {
result.cmd_ = cmdBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDefaultInstance()) return this;
if (other.hasCmd()) {
mergeCmd(other.getCmd());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (hasCmd()) {
if (!getCmd().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.newBuilder();
if (hasCmd()) {
subBuilder.mergeFrom(getCmd());
}
input.readMessage(subBuilder, extensionRegistry);
setCmd(subBuilder.buildPartial());
break;
}
}
}
}
private int bitField0_;
// optional .DatanodeCommandProto cmd = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder> cmdBuilder_;
public boolean hasCmd() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmd() {
if (cmdBuilder_ == null) {
return cmd_;
} else {
return cmdBuilder_.getMessage();
}
}
public Builder setCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto value) {
if (cmdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
cmd_ = value;
onChanged();
} else {
cmdBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder setCmd(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder builderForValue) {
if (cmdBuilder_ == null) {
cmd_ = builderForValue.build();
onChanged();
} else {
cmdBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
public Builder mergeCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto value) {
if (cmdBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
cmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance()) {
cmd_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.newBuilder(cmd_).mergeFrom(value).buildPartial();
} else {
cmd_ = value;
}
onChanged();
} else {
cmdBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder clearCmd() {
if (cmdBuilder_ == null) {
cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance();
onChanged();
} else {
cmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder getCmdBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getCmdFieldBuilder().getBuilder();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdOrBuilder() {
if (cmdBuilder_ != null) {
return cmdBuilder_.getMessageOrBuilder();
} else {
return cmd_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder>
getCmdFieldBuilder() {
if (cmdBuilder_ == null) {
cmdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder>(
cmd_,
getParentForChildren(),
isClean());
cmd_ = null;
}
return cmdBuilder_;
}
// @@protoc_insertion_point(builder_scope:BlockReportResponseProto)
}
static {
defaultInstance = new BlockReportResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:BlockReportResponseProto)
}
public interface ReceivedDeletedBlockInfoProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .BlockProto block = 1;
boolean hasBlock();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder();
// required .ReceivedDeletedBlockInfoProto.BlockStatus status = 3;
boolean hasStatus();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus getStatus();
// optional string deleteHint = 2;
boolean hasDeleteHint();
String getDeleteHint();
}
public static final class ReceivedDeletedBlockInfoProto extends
com.google.protobuf.GeneratedMessage
implements ReceivedDeletedBlockInfoProtoOrBuilder {
// Use ReceivedDeletedBlockInfoProto.newBuilder() to construct.
private ReceivedDeletedBlockInfoProto(Builder builder) {
super(builder);
}
private ReceivedDeletedBlockInfoProto(boolean noInit) {}
private static final ReceivedDeletedBlockInfoProto defaultInstance;
public static ReceivedDeletedBlockInfoProto getDefaultInstance() {
return defaultInstance;
}
public ReceivedDeletedBlockInfoProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReceivedDeletedBlockInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReceivedDeletedBlockInfoProto_fieldAccessorTable;
}
public enum BlockStatus
implements com.google.protobuf.ProtocolMessageEnum {
RECEIVING(0, 1),
RECEIVED(1, 2),
DELETED(2, 3),
;
public static final int RECEIVING_VALUE = 1;
public static final int RECEIVED_VALUE = 2;
public static final int DELETED_VALUE = 3;
public final int getNumber() { return value; }
public static BlockStatus valueOf(int value) {
switch (value) {
case 1: return RECEIVING;
case 2: return RECEIVED;
case 3: return DELETED;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public BlockStatus findValueByNumber(int number) {
return BlockStatus.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.getDescriptor().getEnumTypes().get(0);
}
private static final BlockStatus[] VALUES = {
RECEIVING, RECEIVED, DELETED,
};
public static BlockStatus valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private BlockStatus(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:ReceivedDeletedBlockInfoProto.BlockStatus)
}
private int bitField0_;
// required .BlockProto block = 1;
public static final int BLOCK_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto block_;
public boolean hasBlock() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock() {
return block_;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder() {
return block_;
}
// required .ReceivedDeletedBlockInfoProto.BlockStatus status = 3;
public static final int STATUS_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus status_;
public boolean hasStatus() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus getStatus() {
return status_;
}
// optional string deleteHint = 2;
public static final int DELETEHINT_FIELD_NUMBER = 2;
private java.lang.Object deleteHint_;
public boolean hasDeleteHint() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
public String getDeleteHint() {
java.lang.Object ref = deleteHint_;
if (ref instanceof String) {
return (String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
String s = bs.toStringUtf8();
if (com.google.protobuf.Internal.isValidUtf8(bs)) {
deleteHint_ = s;
}
return s;
}
}
private com.google.protobuf.ByteString getDeleteHintBytes() {
java.lang.Object ref = deleteHint_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8((String) ref);
deleteHint_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
status_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus.RECEIVING;
deleteHint_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasBlock()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasStatus()) {
memoizedIsInitialized = 0;
return false;
}
if (!getBlock().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, block_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(2, getDeleteHintBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeEnum(3, status_.getNumber());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, block_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getDeleteHintBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(3, status_.getNumber());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto) obj;
boolean result = true;
result = result && (hasBlock() == other.hasBlock());
if (hasBlock()) {
result = result && getBlock()
.equals(other.getBlock());
}
result = result && (hasStatus() == other.hasStatus());
if (hasStatus()) {
result = result &&
(getStatus() == other.getStatus());
}
result = result && (hasDeleteHint() == other.hasDeleteHint());
if (hasDeleteHint()) {
result = result && getDeleteHint()
.equals(other.getDeleteHint());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasBlock()) {
hash = (37 * hash) + BLOCK_FIELD_NUMBER;
hash = (53 * hash) + getBlock().hashCode();
}
if (hasStatus()) {
hash = (37 * hash) + STATUS_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getStatus());
}
if (hasDeleteHint()) {
hash = (37 * hash) + DELETEHINT_FIELD_NUMBER;
hash = (53 * hash) + getDeleteHint().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReceivedDeletedBlockInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReceivedDeletedBlockInfoProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getBlockFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (blockBuilder_ == null) {
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
status_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus.RECEIVING;
bitField0_ = (bitField0_ & ~0x00000002);
deleteHint_ = "";
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (blockBuilder_ == null) {
result.block_ = block_;
} else {
result.block_ = blockBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.status_ = status_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.deleteHint_ = deleteHint_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.getDefaultInstance()) return this;
if (other.hasBlock()) {
mergeBlock(other.getBlock());
}
if (other.hasStatus()) {
setStatus(other.getStatus());
}
if (other.hasDeleteHint()) {
setDeleteHint(other.getDeleteHint());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasBlock()) {
return false;
}
if (!hasStatus()) {
return false;
}
if (!getBlock().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder();
if (hasBlock()) {
subBuilder.mergeFrom(getBlock());
}
input.readMessage(subBuilder, extensionRegistry);
setBlock(subBuilder.buildPartial());
break;
}
case 18: {
bitField0_ |= 0x00000004;
deleteHint_ = input.readBytes();
break;
}
case 24: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus value = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(3, rawValue);
} else {
bitField0_ |= 0x00000002;
status_ = value;
}
break;
}
}
}
}
private int bitField0_;
// required .BlockProto block = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> blockBuilder_;
public boolean hasBlock() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock() {
if (blockBuilder_ == null) {
return block_;
} else {
return blockBuilder_.getMessage();
}
}
public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
if (blockBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
block_ = value;
onChanged();
} else {
blockBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder setBlock(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
if (blockBuilder_ == null) {
block_ = builderForValue.build();
onChanged();
} else {
blockBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
if (blockBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) {
block_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(block_).mergeFrom(value).buildPartial();
} else {
block_ = value;
}
onChanged();
} else {
blockBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder clearBlock() {
if (blockBuilder_ == null) {
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
onChanged();
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder getBlockBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getBlockFieldBuilder().getBuilder();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder() {
if (blockBuilder_ != null) {
return blockBuilder_.getMessageOrBuilder();
} else {
return block_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>
getBlockFieldBuilder() {
if (blockBuilder_ == null) {
blockBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>(
block_,
getParentForChildren(),
isClean());
block_ = null;
}
return blockBuilder_;
}
// required .ReceivedDeletedBlockInfoProto.BlockStatus status = 3;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus status_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus.RECEIVING;
public boolean hasStatus() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus getStatus() {
return status_;
}
public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
status_ = value;
onChanged();
return this;
}
public Builder clearStatus() {
bitField0_ = (bitField0_ & ~0x00000002);
status_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus.RECEIVING;
onChanged();
return this;
}
// optional string deleteHint = 2;
private java.lang.Object deleteHint_ = "";
public boolean hasDeleteHint() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
public String getDeleteHint() {
java.lang.Object ref = deleteHint_;
if (!(ref instanceof String)) {
String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
deleteHint_ = s;
return s;
} else {
return (String) ref;
}
}
public Builder setDeleteHint(String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
deleteHint_ = value;
onChanged();
return this;
}
public Builder clearDeleteHint() {
bitField0_ = (bitField0_ & ~0x00000004);
deleteHint_ = getDefaultInstance().getDeleteHint();
onChanged();
return this;
}
void setDeleteHint(com.google.protobuf.ByteString value) {
bitField0_ |= 0x00000004;
deleteHint_ = value;
onChanged();
}
// @@protoc_insertion_point(builder_scope:ReceivedDeletedBlockInfoProto)
}
static {
defaultInstance = new ReceivedDeletedBlockInfoProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:ReceivedDeletedBlockInfoProto)
}
public interface StorageReceivedDeletedBlocksProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string storageID = 1;
boolean hasStorageID();
String getStorageID();
// repeated .ReceivedDeletedBlockInfoProto blocks = 2;
java.util.List
getBlocksList();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto getBlocks(int index);
int getBlocksCount();
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder>
getBlocksOrBuilderList();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder getBlocksOrBuilder(
int index);
}
public static final class StorageReceivedDeletedBlocksProto extends
com.google.protobuf.GeneratedMessage
implements StorageReceivedDeletedBlocksProtoOrBuilder {
// Use StorageReceivedDeletedBlocksProto.newBuilder() to construct.
private StorageReceivedDeletedBlocksProto(Builder builder) {
super(builder);
}
private StorageReceivedDeletedBlocksProto(boolean noInit) {}
private static final StorageReceivedDeletedBlocksProto defaultInstance;
public static StorageReceivedDeletedBlocksProto getDefaultInstance() {
return defaultInstance;
}
public StorageReceivedDeletedBlocksProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_StorageReceivedDeletedBlocksProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_StorageReceivedDeletedBlocksProto_fieldAccessorTable;
}
private int bitField0_;
// required string storageID = 1;
public static final int STORAGEID_FIELD_NUMBER = 1;
private java.lang.Object storageID_;
public boolean hasStorageID() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public String getStorageID() {
java.lang.Object ref = storageID_;
if (ref instanceof String) {
return (String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
String s = bs.toStringUtf8();
if (com.google.protobuf.Internal.isValidUtf8(bs)) {
storageID_ = s;
}
return s;
}
}
private com.google.protobuf.ByteString getStorageIDBytes() {
java.lang.Object ref = storageID_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8((String) ref);
storageID_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// repeated .ReceivedDeletedBlockInfoProto blocks = 2;
public static final int BLOCKS_FIELD_NUMBER = 2;
private java.util.List blocks_;
public java.util.List getBlocksList() {
return blocks_;
}
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder>
getBlocksOrBuilderList() {
return blocks_;
}
public int getBlocksCount() {
return blocks_.size();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto getBlocks(int index) {
return blocks_.get(index);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder getBlocksOrBuilder(
int index) {
return blocks_.get(index);
}
private void initFields() {
storageID_ = "";
blocks_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasStorageID()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getBlocksCount(); i++) {
if (!getBlocks(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getStorageIDBytes());
}
for (int i = 0; i < blocks_.size(); i++) {
output.writeMessage(2, blocks_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getStorageIDBytes());
}
for (int i = 0; i < blocks_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, blocks_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto) obj;
boolean result = true;
result = result && (hasStorageID() == other.hasStorageID());
if (hasStorageID()) {
result = result && getStorageID()
.equals(other.getStorageID());
}
result = result && getBlocksList()
.equals(other.getBlocksList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasStorageID()) {
hash = (37 * hash) + STORAGEID_FIELD_NUMBER;
hash = (53 * hash) + getStorageID().hashCode();
}
if (getBlocksCount() > 0) {
hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
hash = (53 * hash) + getBlocksList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_StorageReceivedDeletedBlocksProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_StorageReceivedDeletedBlocksProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getBlocksFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
storageID_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
if (blocksBuilder_ == null) {
blocks_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
} else {
blocksBuilder_.clear();
}
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.storageID_ = storageID_;
if (blocksBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002)) {
blocks_ = java.util.Collections.unmodifiableList(blocks_);
bitField0_ = (bitField0_ & ~0x00000002);
}
result.blocks_ = blocks_;
} else {
result.blocks_ = blocksBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.getDefaultInstance()) return this;
if (other.hasStorageID()) {
setStorageID(other.getStorageID());
}
if (blocksBuilder_ == null) {
if (!other.blocks_.isEmpty()) {
if (blocks_.isEmpty()) {
blocks_ = other.blocks_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureBlocksIsMutable();
blocks_.addAll(other.blocks_);
}
onChanged();
}
} else {
if (!other.blocks_.isEmpty()) {
if (blocksBuilder_.isEmpty()) {
blocksBuilder_.dispose();
blocksBuilder_ = null;
blocks_ = other.blocks_;
bitField0_ = (bitField0_ & ~0x00000002);
blocksBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getBlocksFieldBuilder() : null;
} else {
blocksBuilder_.addAllMessages(other.blocks_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasStorageID()) {
return false;
}
for (int i = 0; i < getBlocksCount(); i++) {
if (!getBlocks(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
storageID_ = input.readBytes();
break;
}
case 18: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.newBuilder();
input.readMessage(subBuilder, extensionRegistry);
addBlocks(subBuilder.buildPartial());
break;
}
}
}
}
private int bitField0_;
// required string storageID = 1;
private java.lang.Object storageID_ = "";
public boolean hasStorageID() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public String getStorageID() {
java.lang.Object ref = storageID_;
if (!(ref instanceof String)) {
String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
storageID_ = s;
return s;
} else {
return (String) ref;
}
}
public Builder setStorageID(String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
storageID_ = value;
onChanged();
return this;
}
public Builder clearStorageID() {
bitField0_ = (bitField0_ & ~0x00000001);
storageID_ = getDefaultInstance().getStorageID();
onChanged();
return this;
}
void setStorageID(com.google.protobuf.ByteString value) {
bitField0_ |= 0x00000001;
storageID_ = value;
onChanged();
}
// repeated .ReceivedDeletedBlockInfoProto blocks = 2;
private java.util.List blocks_ =
java.util.Collections.emptyList();
private void ensureBlocksIsMutable() {
if (!((bitField0_ & 0x00000002) == 0x00000002)) {
blocks_ = new java.util.ArrayList(blocks_);
bitField0_ |= 0x00000002;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder> blocksBuilder_;
public java.util.List getBlocksList() {
if (blocksBuilder_ == null) {
return java.util.Collections.unmodifiableList(blocks_);
} else {
return blocksBuilder_.getMessageList();
}
}
public int getBlocksCount() {
if (blocksBuilder_ == null) {
return blocks_.size();
} else {
return blocksBuilder_.getCount();
}
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto getBlocks(int index) {
if (blocksBuilder_ == null) {
return blocks_.get(index);
} else {
return blocksBuilder_.getMessage(index);
}
}
public Builder setBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.set(index, value);
onChanged();
} else {
blocksBuilder_.setMessage(index, value);
}
return this;
}
public Builder setBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.set(index, builderForValue.build());
onChanged();
} else {
blocksBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.add(value);
onChanged();
} else {
blocksBuilder_.addMessage(value);
}
return this;
}
public Builder addBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.add(index, value);
onChanged();
} else {
blocksBuilder_.addMessage(index, value);
}
return this;
}
public Builder addBlocks(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.add(builderForValue.build());
onChanged();
} else {
blocksBuilder_.addMessage(builderForValue.build());
}
return this;
}
public Builder addBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.add(index, builderForValue.build());
onChanged();
} else {
blocksBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
public Builder addAllBlocks(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto> values) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
super.addAll(values, blocks_);
onChanged();
} else {
blocksBuilder_.addAllMessages(values);
}
return this;
}
public Builder clearBlocks() {
if (blocksBuilder_ == null) {
blocks_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
} else {
blocksBuilder_.clear();
}
return this;
}
public Builder removeBlocks(int index) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.remove(index);
onChanged();
} else {
blocksBuilder_.remove(index);
}
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder getBlocksBuilder(
int index) {
return getBlocksFieldBuilder().getBuilder(index);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder getBlocksOrBuilder(
int index) {
if (blocksBuilder_ == null) {
return blocks_.get(index); } else {
return blocksBuilder_.getMessageOrBuilder(index);
}
}
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder>
getBlocksOrBuilderList() {
if (blocksBuilder_ != null) {
return blocksBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(blocks_);
}
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder addBlocksBuilder() {
return getBlocksFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.getDefaultInstance());
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder addBlocksBuilder(
int index) {
return getBlocksFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.getDefaultInstance());
}
public java.util.List
getBlocksBuilderList() {
return getBlocksFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder>
getBlocksFieldBuilder() {
if (blocksBuilder_ == null) {
blocksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder>(
blocks_,
((bitField0_ & 0x00000002) == 0x00000002),
getParentForChildren(),
isClean());
blocks_ = null;
}
return blocksBuilder_;
}
// @@protoc_insertion_point(builder_scope:StorageReceivedDeletedBlocksProto)
}
static {
defaultInstance = new StorageReceivedDeletedBlocksProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:StorageReceivedDeletedBlocksProto)
}
public interface BlockReceivedAndDeletedRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .DatanodeRegistrationProto registration = 1;
boolean hasRegistration();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder();
// required string blockPoolId = 2;
boolean hasBlockPoolId();
String getBlockPoolId();
// repeated .StorageReceivedDeletedBlocksProto blocks = 3;
java.util.List
getBlocksList();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto getBlocks(int index);
int getBlocksCount();
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProtoOrBuilder>
getBlocksOrBuilderList();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProtoOrBuilder getBlocksOrBuilder(
int index);
}
public static final class BlockReceivedAndDeletedRequestProto extends
com.google.protobuf.GeneratedMessage
implements BlockReceivedAndDeletedRequestProtoOrBuilder {
// Use BlockReceivedAndDeletedRequestProto.newBuilder() to construct.
private BlockReceivedAndDeletedRequestProto(Builder builder) {
super(builder);
}
private BlockReceivedAndDeletedRequestProto(boolean noInit) {}
private static final BlockReceivedAndDeletedRequestProto defaultInstance;
public static BlockReceivedAndDeletedRequestProto getDefaultInstance() {
return defaultInstance;
}
public BlockReceivedAndDeletedRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReceivedAndDeletedRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReceivedAndDeletedRequestProto_fieldAccessorTable;
}
private int bitField0_;
// required .DatanodeRegistrationProto registration = 1;
public static final int REGISTRATION_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_;
public boolean hasRegistration() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
return registration_;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
return registration_;
}
// required string blockPoolId = 2;
public static final int BLOCKPOOLID_FIELD_NUMBER = 2;
private java.lang.Object blockPoolId_;
public boolean hasBlockPoolId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public String getBlockPoolId() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof String) {
return (String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
String s = bs.toStringUtf8();
if (com.google.protobuf.Internal.isValidUtf8(bs)) {
blockPoolId_ = s;
}
return s;
}
}
private com.google.protobuf.ByteString getBlockPoolIdBytes() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8((String) ref);
blockPoolId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// repeated .StorageReceivedDeletedBlocksProto blocks = 3;
public static final int BLOCKS_FIELD_NUMBER = 3;
private java.util.List blocks_;
public java.util.List getBlocksList() {
return blocks_;
}
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProtoOrBuilder>
getBlocksOrBuilderList() {
return blocks_;
}
public int getBlocksCount() {
return blocks_.size();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto getBlocks(int index) {
return blocks_.get(index);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProtoOrBuilder getBlocksOrBuilder(
int index) {
return blocks_.get(index);
}
private void initFields() {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
blockPoolId_ = "";
blocks_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasRegistration()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasBlockPoolId()) {
memoizedIsInitialized = 0;
return false;
}
if (!getRegistration().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getBlocksCount(); i++) {
if (!getBlocks(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, registration_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getBlockPoolIdBytes());
}
for (int i = 0; i < blocks_.size(); i++) {
output.writeMessage(3, blocks_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, registration_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getBlockPoolIdBytes());
}
for (int i = 0; i < blocks_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, blocks_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto) obj;
boolean result = true;
result = result && (hasRegistration() == other.hasRegistration());
if (hasRegistration()) {
result = result && getRegistration()
.equals(other.getRegistration());
}
result = result && (hasBlockPoolId() == other.hasBlockPoolId());
if (hasBlockPoolId()) {
result = result && getBlockPoolId()
.equals(other.getBlockPoolId());
}
result = result && getBlocksList()
.equals(other.getBlocksList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasRegistration()) {
hash = (37 * hash) + REGISTRATION_FIELD_NUMBER;
hash = (53 * hash) + getRegistration().hashCode();
}
if (hasBlockPoolId()) {
hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER;
hash = (53 * hash) + getBlockPoolId().hashCode();
}
if (getBlocksCount() > 0) {
hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
hash = (53 * hash) + getBlocksList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReceivedAndDeletedRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReceivedAndDeletedRequestProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getRegistrationFieldBuilder();
getBlocksFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (registrationBuilder_ == null) {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
} else {
registrationBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
blockPoolId_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
if (blocksBuilder_ == null) {
blocks_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
} else {
blocksBuilder_.clear();
}
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (registrationBuilder_ == null) {
result.registration_ = registration_;
} else {
result.registration_ = registrationBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.blockPoolId_ = blockPoolId_;
if (blocksBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004)) {
blocks_ = java.util.Collections.unmodifiableList(blocks_);
bitField0_ = (bitField0_ & ~0x00000004);
}
result.blocks_ = blocks_;
} else {
result.blocks_ = blocksBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.getDefaultInstance()) return this;
if (other.hasRegistration()) {
mergeRegistration(other.getRegistration());
}
if (other.hasBlockPoolId()) {
setBlockPoolId(other.getBlockPoolId());
}
if (blocksBuilder_ == null) {
if (!other.blocks_.isEmpty()) {
if (blocks_.isEmpty()) {
blocks_ = other.blocks_;
bitField0_ = (bitField0_ & ~0x00000004);
} else {
ensureBlocksIsMutable();
blocks_.addAll(other.blocks_);
}
onChanged();
}
} else {
if (!other.blocks_.isEmpty()) {
if (blocksBuilder_.isEmpty()) {
blocksBuilder_.dispose();
blocksBuilder_ = null;
blocks_ = other.blocks_;
bitField0_ = (bitField0_ & ~0x00000004);
blocksBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getBlocksFieldBuilder() : null;
} else {
blocksBuilder_.addAllMessages(other.blocks_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasRegistration()) {
return false;
}
if (!hasBlockPoolId()) {
return false;
}
if (!getRegistration().isInitialized()) {
return false;
}
for (int i = 0; i < getBlocksCount(); i++) {
if (!getBlocks(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder();
if (hasRegistration()) {
subBuilder.mergeFrom(getRegistration());
}
input.readMessage(subBuilder, extensionRegistry);
setRegistration(subBuilder.buildPartial());
break;
}
case 18: {
bitField0_ |= 0x00000002;
blockPoolId_ = input.readBytes();
break;
}
case 26: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.newBuilder();
input.readMessage(subBuilder, extensionRegistry);
addBlocks(subBuilder.buildPartial());
break;
}
}
}
}
private int bitField0_;
// required .DatanodeRegistrationProto registration = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registrationBuilder_;
public boolean hasRegistration() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
if (registrationBuilder_ == null) {
return registration_;
} else {
return registrationBuilder_.getMessage();
}
}
public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
if (registrationBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
registration_ = value;
onChanged();
} else {
registrationBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder setRegistration(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) {
if (registrationBuilder_ == null) {
registration_ = builderForValue.build();
onChanged();
} else {
registrationBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
if (registrationBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
registration_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) {
registration_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial();
} else {
registration_ = value;
}
onChanged();
} else {
registrationBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder clearRegistration() {
if (registrationBuilder_ == null) {
registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
onChanged();
} else {
registrationBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistrationBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getRegistrationFieldBuilder().getBuilder();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
if (registrationBuilder_ != null) {
return registrationBuilder_.getMessageOrBuilder();
} else {
return registration_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>
getRegistrationFieldBuilder() {
if (registrationBuilder_ == null) {
registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>(
registration_,
getParentForChildren(),
isClean());
registration_ = null;
}
return registrationBuilder_;
}
// required string blockPoolId = 2;
private java.lang.Object blockPoolId_ = "";
public boolean hasBlockPoolId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public String getBlockPoolId() {
java.lang.Object ref = blockPoolId_;
if (!(ref instanceof String)) {
String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
blockPoolId_ = s;
return s;
} else {
return (String) ref;
}
}
public Builder setBlockPoolId(String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
blockPoolId_ = value;
onChanged();
return this;
}
public Builder clearBlockPoolId() {
bitField0_ = (bitField0_ & ~0x00000002);
blockPoolId_ = getDefaultInstance().getBlockPoolId();
onChanged();
return this;
}
void setBlockPoolId(com.google.protobuf.ByteString value) {
bitField0_ |= 0x00000002;
blockPoolId_ = value;
onChanged();
}
// repeated .StorageReceivedDeletedBlocksProto blocks = 3;
private java.util.List blocks_ =
java.util.Collections.emptyList();
private void ensureBlocksIsMutable() {
if (!((bitField0_ & 0x00000004) == 0x00000004)) {
blocks_ = new java.util.ArrayList(blocks_);
bitField0_ |= 0x00000004;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProtoOrBuilder> blocksBuilder_;
public java.util.List getBlocksList() {
if (blocksBuilder_ == null) {
return java.util.Collections.unmodifiableList(blocks_);
} else {
return blocksBuilder_.getMessageList();
}
}
public int getBlocksCount() {
if (blocksBuilder_ == null) {
return blocks_.size();
} else {
return blocksBuilder_.getCount();
}
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto getBlocks(int index) {
if (blocksBuilder_ == null) {
return blocks_.get(index);
} else {
return blocksBuilder_.getMessage(index);
}
}
public Builder setBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.set(index, value);
onChanged();
} else {
blocksBuilder_.setMessage(index, value);
}
return this;
}
public Builder setBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.set(index, builderForValue.build());
onChanged();
} else {
blocksBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.add(value);
onChanged();
} else {
blocksBuilder_.addMessage(value);
}
return this;
}
public Builder addBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.add(index, value);
onChanged();
} else {
blocksBuilder_.addMessage(index, value);
}
return this;
}
public Builder addBlocks(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.add(builderForValue.build());
onChanged();
} else {
blocksBuilder_.addMessage(builderForValue.build());
}
return this;
}
public Builder addBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.add(index, builderForValue.build());
onChanged();
} else {
blocksBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
public Builder addAllBlocks(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto> values) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
super.addAll(values, blocks_);
onChanged();
} else {
blocksBuilder_.addAllMessages(values);
}
return this;
}
public Builder clearBlocks() {
if (blocksBuilder_ == null) {
blocks_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
onChanged();
} else {
blocksBuilder_.clear();
}
return this;
}
public Builder removeBlocks(int index) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.remove(index);
onChanged();
} else {
blocksBuilder_.remove(index);
}
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder getBlocksBuilder(
int index) {
return getBlocksFieldBuilder().getBuilder(index);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProtoOrBuilder getBlocksOrBuilder(
int index) {
if (blocksBuilder_ == null) {
return blocks_.get(index); } else {
return blocksBuilder_.getMessageOrBuilder(index);
}
}
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProtoOrBuilder>
getBlocksOrBuilderList() {
if (blocksBuilder_ != null) {
return blocksBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(blocks_);
}
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder addBlocksBuilder() {
return getBlocksFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.getDefaultInstance());
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder addBlocksBuilder(
int index) {
return getBlocksFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.getDefaultInstance());
}
public java.util.List
getBlocksBuilderList() {
return getBlocksFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProtoOrBuilder>
getBlocksFieldBuilder() {
if (blocksBuilder_ == null) {
blocksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProtoOrBuilder>(
blocks_,
((bitField0_ & 0x00000004) == 0x00000004),
getParentForChildren(),
isClean());
blocks_ = null;
}
return blocksBuilder_;
}
// @@protoc_insertion_point(builder_scope:BlockReceivedAndDeletedRequestProto)
}
static {
defaultInstance = new BlockReceivedAndDeletedRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:BlockReceivedAndDeletedRequestProto)
}
public interface BlockReceivedAndDeletedResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
}
public static final class BlockReceivedAndDeletedResponseProto extends
com.google.protobuf.GeneratedMessage
implements BlockReceivedAndDeletedResponseProtoOrBuilder {
// Use BlockReceivedAndDeletedResponseProto.newBuilder() to construct.
private BlockReceivedAndDeletedResponseProto(Builder builder) {
super(builder);
}
private BlockReceivedAndDeletedResponseProto(boolean noInit) {}
private static final BlockReceivedAndDeletedResponseProto defaultInstance;
public static BlockReceivedAndDeletedResponseProto getDefaultInstance() {
return defaultInstance;
}
public BlockReceivedAndDeletedResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReceivedAndDeletedResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReceivedAndDeletedResponseProto_fieldAccessorTable;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReceivedAndDeletedResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReceivedAndDeletedResponseProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
}
}
}
// @@protoc_insertion_point(builder_scope:BlockReceivedAndDeletedResponseProto)
}
static {
defaultInstance = new BlockReceivedAndDeletedResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:BlockReceivedAndDeletedResponseProto)
}
public interface ErrorReportRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .DatanodeRegistrationProto registartion = 1;
boolean hasRegistartion();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistartion();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistartionOrBuilder();
// required uint32 errorCode = 2;
boolean hasErrorCode();
int getErrorCode();
// required string msg = 3;
boolean hasMsg();
String getMsg();
}
public static final class ErrorReportRequestProto extends
com.google.protobuf.GeneratedMessage
implements ErrorReportRequestProtoOrBuilder {
// Use ErrorReportRequestProto.newBuilder() to construct.
private ErrorReportRequestProto(Builder builder) {
super(builder);
}
private ErrorReportRequestProto(boolean noInit) {}
private static final ErrorReportRequestProto defaultInstance;
public static ErrorReportRequestProto getDefaultInstance() {
return defaultInstance;
}
public ErrorReportRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ErrorReportRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ErrorReportRequestProto_fieldAccessorTable;
}
public enum ErrorCode
implements com.google.protobuf.ProtocolMessageEnum {
NOTIFY(0, 0),
DISK_ERROR(1, 1),
INVALID_BLOCK(2, 2),
FATAL_DISK_ERROR(3, 3),
;
public static final int NOTIFY_VALUE = 0;
public static final int DISK_ERROR_VALUE = 1;
public static final int INVALID_BLOCK_VALUE = 2;
public static final int FATAL_DISK_ERROR_VALUE = 3;
public final int getNumber() { return value; }
public static ErrorCode valueOf(int value) {
switch (value) {
case 0: return NOTIFY;
case 1: return DISK_ERROR;
case 2: return INVALID_BLOCK;
case 3: return FATAL_DISK_ERROR;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public ErrorCode findValueByNumber(int number) {
return ErrorCode.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.getDescriptor().getEnumTypes().get(0);
}
private static final ErrorCode[] VALUES = {
NOTIFY, DISK_ERROR, INVALID_BLOCK, FATAL_DISK_ERROR,
};
public static ErrorCode valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private ErrorCode(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:ErrorReportRequestProto.ErrorCode)
}
private int bitField0_;
// required .DatanodeRegistrationProto registartion = 1;
public static final int REGISTARTION_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registartion_;
public boolean hasRegistartion() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistartion() {
return registartion_;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistartionOrBuilder() {
return registartion_;
}
// required uint32 errorCode = 2;
public static final int ERRORCODE_FIELD_NUMBER = 2;
private int errorCode_;
public boolean hasErrorCode() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public int getErrorCode() {
return errorCode_;
}
// required string msg = 3;
public static final int MSG_FIELD_NUMBER = 3;
private java.lang.Object msg_;
public boolean hasMsg() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
public String getMsg() {
java.lang.Object ref = msg_;
if (ref instanceof String) {
return (String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
String s = bs.toStringUtf8();
if (com.google.protobuf.Internal.isValidUtf8(bs)) {
msg_ = s;
}
return s;
}
}
private com.google.protobuf.ByteString getMsgBytes() {
java.lang.Object ref = msg_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8((String) ref);
msg_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
registartion_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
errorCode_ = 0;
msg_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasRegistartion()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasErrorCode()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasMsg()) {
memoizedIsInitialized = 0;
return false;
}
if (!getRegistartion().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, registartion_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt32(2, errorCode_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, getMsgBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, registartion_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(2, errorCode_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, getMsgBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto) obj;
boolean result = true;
result = result && (hasRegistartion() == other.hasRegistartion());
if (hasRegistartion()) {
result = result && getRegistartion()
.equals(other.getRegistartion());
}
result = result && (hasErrorCode() == other.hasErrorCode());
if (hasErrorCode()) {
result = result && (getErrorCode()
== other.getErrorCode());
}
result = result && (hasMsg() == other.hasMsg());
if (hasMsg()) {
result = result && getMsg()
.equals(other.getMsg());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasRegistartion()) {
hash = (37 * hash) + REGISTARTION_FIELD_NUMBER;
hash = (53 * hash) + getRegistartion().hashCode();
}
if (hasErrorCode()) {
hash = (37 * hash) + ERRORCODE_FIELD_NUMBER;
hash = (53 * hash) + getErrorCode();
}
if (hasMsg()) {
hash = (37 * hash) + MSG_FIELD_NUMBER;
hash = (53 * hash) + getMsg().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ErrorReportRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ErrorReportRequestProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getRegistartionFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (registartionBuilder_ == null) {
registartion_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
} else {
registartionBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
errorCode_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
msg_ = "";
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (registartionBuilder_ == null) {
result.registartion_ = registartion_;
} else {
result.registartion_ = registartionBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.errorCode_ = errorCode_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.msg_ = msg_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance()) return this;
if (other.hasRegistartion()) {
mergeRegistartion(other.getRegistartion());
}
if (other.hasErrorCode()) {
setErrorCode(other.getErrorCode());
}
if (other.hasMsg()) {
setMsg(other.getMsg());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasRegistartion()) {
return false;
}
if (!hasErrorCode()) {
return false;
}
if (!hasMsg()) {
return false;
}
if (!getRegistartion().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder();
if (hasRegistartion()) {
subBuilder.mergeFrom(getRegistartion());
}
input.readMessage(subBuilder, extensionRegistry);
setRegistartion(subBuilder.buildPartial());
break;
}
case 16: {
bitField0_ |= 0x00000002;
errorCode_ = input.readUInt32();
break;
}
case 26: {
bitField0_ |= 0x00000004;
msg_ = input.readBytes();
break;
}
}
}
}
private int bitField0_;
// required .DatanodeRegistrationProto registartion = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registartion_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registartionBuilder_;
public boolean hasRegistartion() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistartion() {
if (registartionBuilder_ == null) {
return registartion_;
} else {
return registartionBuilder_.getMessage();
}
}
public Builder setRegistartion(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
if (registartionBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
registartion_ = value;
onChanged();
} else {
registartionBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder setRegistartion(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) {
if (registartionBuilder_ == null) {
registartion_ = builderForValue.build();
onChanged();
} else {
registartionBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
public Builder mergeRegistartion(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
if (registartionBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
registartion_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) {
registartion_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(registartion_).mergeFrom(value).buildPartial();
} else {
registartion_ = value;
}
onChanged();
} else {
registartionBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder clearRegistartion() {
if (registartionBuilder_ == null) {
registartion_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
onChanged();
} else {
registartionBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistartionBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getRegistartionFieldBuilder().getBuilder();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistartionOrBuilder() {
if (registartionBuilder_ != null) {
return registartionBuilder_.getMessageOrBuilder();
} else {
return registartion_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>
getRegistartionFieldBuilder() {
if (registartionBuilder_ == null) {
registartionBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>(
registartion_,
getParentForChildren(),
isClean());
registartion_ = null;
}
return registartionBuilder_;
}
// required uint32 errorCode = 2;
private int errorCode_ ;
public boolean hasErrorCode() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public int getErrorCode() {
return errorCode_;
}
public Builder setErrorCode(int value) {
bitField0_ |= 0x00000002;
errorCode_ = value;
onChanged();
return this;
}
public Builder clearErrorCode() {
bitField0_ = (bitField0_ & ~0x00000002);
errorCode_ = 0;
onChanged();
return this;
}
// required string msg = 3;
private java.lang.Object msg_ = "";
public boolean hasMsg() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
public String getMsg() {
java.lang.Object ref = msg_;
if (!(ref instanceof String)) {
String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
msg_ = s;
return s;
} else {
return (String) ref;
}
}
public Builder setMsg(String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
msg_ = value;
onChanged();
return this;
}
public Builder clearMsg() {
bitField0_ = (bitField0_ & ~0x00000004);
msg_ = getDefaultInstance().getMsg();
onChanged();
return this;
}
void setMsg(com.google.protobuf.ByteString value) {
bitField0_ |= 0x00000004;
msg_ = value;
onChanged();
}
// @@protoc_insertion_point(builder_scope:ErrorReportRequestProto)
}
static {
defaultInstance = new ErrorReportRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:ErrorReportRequestProto)
}
public interface ErrorReportResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
}
public static final class ErrorReportResponseProto extends
com.google.protobuf.GeneratedMessage
implements ErrorReportResponseProtoOrBuilder {
// Use ErrorReportResponseProto.newBuilder() to construct.
private ErrorReportResponseProto(Builder builder) {
super(builder);
}
private ErrorReportResponseProto(boolean noInit) {}
private static final ErrorReportResponseProto defaultInstance;
public static ErrorReportResponseProto getDefaultInstance() {
return defaultInstance;
}
public ErrorReportResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ErrorReportResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ErrorReportResponseProto_fieldAccessorTable;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ErrorReportResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ErrorReportResponseProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
}
}
}
// @@protoc_insertion_point(builder_scope:ErrorReportResponseProto)
}
static {
defaultInstance = new ErrorReportResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:ErrorReportResponseProto)
}
public interface ProcessUpgradeRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// optional .UpgradeCommandProto cmd = 1;
boolean hasCmd();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto getCmd();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder getCmdOrBuilder();
}
public static final class ProcessUpgradeRequestProto extends
com.google.protobuf.GeneratedMessage
implements ProcessUpgradeRequestProtoOrBuilder {
// Use ProcessUpgradeRequestProto.newBuilder() to construct.
private ProcessUpgradeRequestProto(Builder builder) {
super(builder);
}
private ProcessUpgradeRequestProto(boolean noInit) {}
private static final ProcessUpgradeRequestProto defaultInstance;
public static ProcessUpgradeRequestProto getDefaultInstance() {
return defaultInstance;
}
public ProcessUpgradeRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ProcessUpgradeRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ProcessUpgradeRequestProto_fieldAccessorTable;
}
private int bitField0_;
// optional .UpgradeCommandProto cmd = 1;
public static final int CMD_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto cmd_;
public boolean hasCmd() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto getCmd() {
return cmd_;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder getCmdOrBuilder() {
return cmd_;
}
private void initFields() {
cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (hasCmd()) {
if (!getCmd().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, cmd_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, cmd_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto) obj;
boolean result = true;
result = result && (hasCmd() == other.hasCmd());
if (hasCmd()) {
result = result && getCmd()
.equals(other.getCmd());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasCmd()) {
hash = (37 * hash) + CMD_FIELD_NUMBER;
hash = (53 * hash) + getCmd().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ProcessUpgradeRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ProcessUpgradeRequestProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getCmdFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (cmdBuilder_ == null) {
cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance();
} else {
cmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (cmdBuilder_ == null) {
result.cmd_ = cmd_;
} else {
result.cmd_ = cmdBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto.getDefaultInstance()) return this;
if (other.hasCmd()) {
mergeCmd(other.getCmd());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (hasCmd()) {
if (!getCmd().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.newBuilder();
if (hasCmd()) {
subBuilder.mergeFrom(getCmd());
}
input.readMessage(subBuilder, extensionRegistry);
setCmd(subBuilder.buildPartial());
break;
}
}
}
}
private int bitField0_;
// optional .UpgradeCommandProto cmd = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder> cmdBuilder_;
public boolean hasCmd() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto getCmd() {
if (cmdBuilder_ == null) {
return cmd_;
} else {
return cmdBuilder_.getMessage();
}
}
public Builder setCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto value) {
if (cmdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
cmd_ = value;
onChanged();
} else {
cmdBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder setCmd(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder builderForValue) {
if (cmdBuilder_ == null) {
cmd_ = builderForValue.build();
onChanged();
} else {
cmdBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
public Builder mergeCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto value) {
if (cmdBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
cmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance()) {
cmd_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.newBuilder(cmd_).mergeFrom(value).buildPartial();
} else {
cmd_ = value;
}
onChanged();
} else {
cmdBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder clearCmd() {
if (cmdBuilder_ == null) {
cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance();
onChanged();
} else {
cmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder getCmdBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getCmdFieldBuilder().getBuilder();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder getCmdOrBuilder() {
if (cmdBuilder_ != null) {
return cmdBuilder_.getMessageOrBuilder();
} else {
return cmd_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder>
getCmdFieldBuilder() {
if (cmdBuilder_ == null) {
cmdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder>(
cmd_,
getParentForChildren(),
isClean());
cmd_ = null;
}
return cmdBuilder_;
}
// @@protoc_insertion_point(builder_scope:ProcessUpgradeRequestProto)
}
static {
defaultInstance = new ProcessUpgradeRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:ProcessUpgradeRequestProto)
}
public interface ProcessUpgradeResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// optional .UpgradeCommandProto cmd = 1;
boolean hasCmd();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto getCmd();
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder getCmdOrBuilder();
}
public static final class ProcessUpgradeResponseProto extends
com.google.protobuf.GeneratedMessage
implements ProcessUpgradeResponseProtoOrBuilder {
// Use ProcessUpgradeResponseProto.newBuilder() to construct.
private ProcessUpgradeResponseProto(Builder builder) {
super(builder);
}
private ProcessUpgradeResponseProto(boolean noInit) {}
private static final ProcessUpgradeResponseProto defaultInstance;
public static ProcessUpgradeResponseProto getDefaultInstance() {
return defaultInstance;
}
public ProcessUpgradeResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ProcessUpgradeResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ProcessUpgradeResponseProto_fieldAccessorTable;
}
private int bitField0_;
// optional .UpgradeCommandProto cmd = 1;
public static final int CMD_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto cmd_;
public boolean hasCmd() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto getCmd() {
return cmd_;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder getCmdOrBuilder() {
return cmd_;
}
private void initFields() {
cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (hasCmd()) {
if (!getCmd().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, cmd_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, cmd_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto) obj;
boolean result = true;
result = result && (hasCmd() == other.hasCmd());
if (hasCmd()) {
result = result && getCmd()
.equals(other.getCmd());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasCmd()) {
hash = (37 * hash) + CMD_FIELD_NUMBER;
hash = (53 * hash) + getCmd().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ProcessUpgradeResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ProcessUpgradeResponseProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getCmdFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (cmdBuilder_ == null) {
cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance();
} else {
cmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (cmdBuilder_ == null) {
result.cmd_ = cmd_;
} else {
result.cmd_ = cmdBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.getDefaultInstance()) return this;
if (other.hasCmd()) {
mergeCmd(other.getCmd());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (hasCmd()) {
if (!getCmd().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.newBuilder();
if (hasCmd()) {
subBuilder.mergeFrom(getCmd());
}
input.readMessage(subBuilder, extensionRegistry);
setCmd(subBuilder.buildPartial());
break;
}
}
}
}
private int bitField0_;
// optional .UpgradeCommandProto cmd = 1;
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder> cmdBuilder_;
public boolean hasCmd() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto getCmd() {
if (cmdBuilder_ == null) {
return cmd_;
} else {
return cmdBuilder_.getMessage();
}
}
public Builder setCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto value) {
if (cmdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
cmd_ = value;
onChanged();
} else {
cmdBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder setCmd(
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder builderForValue) {
if (cmdBuilder_ == null) {
cmd_ = builderForValue.build();
onChanged();
} else {
cmdBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
public Builder mergeCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto value) {
if (cmdBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
cmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance()) {
cmd_ =
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.newBuilder(cmd_).mergeFrom(value).buildPartial();
} else {
cmd_ = value;
}
onChanged();
} else {
cmdBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder clearCmd() {
if (cmdBuilder_ == null) {
cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance();
onChanged();
} else {
cmdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder getCmdBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getCmdFieldBuilder().getBuilder();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder getCmdOrBuilder() {
if (cmdBuilder_ != null) {
return cmdBuilder_.getMessageOrBuilder();
} else {
return cmd_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder>
getCmdFieldBuilder() {
if (cmdBuilder_ == null) {
cmdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder>(
cmd_,
getParentForChildren(),
isClean());
cmd_ = null;
}
return cmdBuilder_;
}
// @@protoc_insertion_point(builder_scope:ProcessUpgradeResponseProto)
}
static {
defaultInstance = new ProcessUpgradeResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:ProcessUpgradeResponseProto)
}
public interface ReportBadBlocksRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated .LocatedBlockProto blocks = 1;
java.util.List
getBlocksList();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index);
int getBlocksCount();
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>
getBlocksOrBuilderList();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder(
int index);
}
public static final class ReportBadBlocksRequestProto extends
com.google.protobuf.GeneratedMessage
implements ReportBadBlocksRequestProtoOrBuilder {
// Use ReportBadBlocksRequestProto.newBuilder() to construct.
private ReportBadBlocksRequestProto(Builder builder) {
super(builder);
}
private ReportBadBlocksRequestProto(boolean noInit) {}
private static final ReportBadBlocksRequestProto defaultInstance;
public static ReportBadBlocksRequestProto getDefaultInstance() {
return defaultInstance;
}
public ReportBadBlocksRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReportBadBlocksRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReportBadBlocksRequestProto_fieldAccessorTable;
}
// repeated .LocatedBlockProto blocks = 1;
public static final int BLOCKS_FIELD_NUMBER = 1;
private java.util.List blocks_;
public java.util.List getBlocksList() {
return blocks_;
}
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>
getBlocksOrBuilderList() {
return blocks_;
}
public int getBlocksCount() {
return blocks_.size();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index) {
return blocks_.get(index);
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder(
int index) {
return blocks_.get(index);
}
private void initFields() {
blocks_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
for (int i = 0; i < getBlocksCount(); i++) {
if (!getBlocks(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < blocks_.size(); i++) {
output.writeMessage(1, blocks_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
for (int i = 0; i < blocks_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, blocks_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto) obj;
boolean result = true;
result = result && getBlocksList()
.equals(other.getBlocksList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (getBlocksCount() > 0) {
hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
hash = (53 * hash) + getBlocksList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReportBadBlocksRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReportBadBlocksRequestProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getBlocksFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (blocksBuilder_ == null) {
blocks_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
} else {
blocksBuilder_.clear();
}
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto(this);
int from_bitField0_ = bitField0_;
if (blocksBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001)) {
blocks_ = java.util.Collections.unmodifiableList(blocks_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.blocks_ = blocks_;
} else {
result.blocks_ = blocksBuilder_.build();
}
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.getDefaultInstance()) return this;
if (blocksBuilder_ == null) {
if (!other.blocks_.isEmpty()) {
if (blocks_.isEmpty()) {
blocks_ = other.blocks_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureBlocksIsMutable();
blocks_.addAll(other.blocks_);
}
onChanged();
}
} else {
if (!other.blocks_.isEmpty()) {
if (blocksBuilder_.isEmpty()) {
blocksBuilder_.dispose();
blocksBuilder_ = null;
blocks_ = other.blocks_;
bitField0_ = (bitField0_ & ~0x00000001);
blocksBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getBlocksFieldBuilder() : null;
} else {
blocksBuilder_.addAllMessages(other.blocks_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
for (int i = 0; i < getBlocksCount(); i++) {
if (!getBlocks(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder();
input.readMessage(subBuilder, extensionRegistry);
addBlocks(subBuilder.buildPartial());
break;
}
}
}
}
private int bitField0_;
// repeated .LocatedBlockProto blocks = 1;
private java.util.List blocks_ =
java.util.Collections.emptyList();
private void ensureBlocksIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
blocks_ = new java.util.ArrayList(blocks_);
bitField0_ |= 0x00000001;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> blocksBuilder_;
public java.util.List getBlocksList() {
if (blocksBuilder_ == null) {
return java.util.Collections.unmodifiableList(blocks_);
} else {
return blocksBuilder_.getMessageList();
}
}
public int getBlocksCount() {
if (blocksBuilder_ == null) {
return blocks_.size();
} else {
return blocksBuilder_.getCount();
}
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index) {
if (blocksBuilder_ == null) {
return blocks_.get(index);
} else {
return blocksBuilder_.getMessage(index);
}
}
public Builder setBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.set(index, value);
onChanged();
} else {
blocksBuilder_.setMessage(index, value);
}
return this;
}
public Builder setBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.set(index, builderForValue.build());
onChanged();
} else {
blocksBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.add(value);
onChanged();
} else {
blocksBuilder_.addMessage(value);
}
return this;
}
public Builder addBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.add(index, value);
onChanged();
} else {
blocksBuilder_.addMessage(index, value);
}
return this;
}
public Builder addBlocks(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.add(builderForValue.build());
onChanged();
} else {
blocksBuilder_.addMessage(builderForValue.build());
}
return this;
}
public Builder addBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.add(index, builderForValue.build());
onChanged();
} else {
blocksBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
public Builder addAllBlocks(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto> values) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
super.addAll(values, blocks_);
onChanged();
} else {
blocksBuilder_.addAllMessages(values);
}
return this;
}
public Builder clearBlocks() {
if (blocksBuilder_ == null) {
blocks_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
} else {
blocksBuilder_.clear();
}
return this;
}
public Builder removeBlocks(int index) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.remove(index);
onChanged();
} else {
blocksBuilder_.remove(index);
}
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getBlocksBuilder(
int index) {
return getBlocksFieldBuilder().getBuilder(index);
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder(
int index) {
if (blocksBuilder_ == null) {
return blocks_.get(index); } else {
return blocksBuilder_.getMessageOrBuilder(index);
}
}
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>
getBlocksOrBuilderList() {
if (blocksBuilder_ != null) {
return blocksBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(blocks_);
}
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder addBlocksBuilder() {
return getBlocksFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance());
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder addBlocksBuilder(
int index) {
return getBlocksFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance());
}
public java.util.List
getBlocksBuilderList() {
return getBlocksFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>
getBlocksFieldBuilder() {
if (blocksBuilder_ == null) {
blocksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>(
blocks_,
((bitField0_ & 0x00000001) == 0x00000001),
getParentForChildren(),
isClean());
blocks_ = null;
}
return blocksBuilder_;
}
// @@protoc_insertion_point(builder_scope:ReportBadBlocksRequestProto)
}
static {
defaultInstance = new ReportBadBlocksRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:ReportBadBlocksRequestProto)
}
public interface ReportBadBlocksResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
}
public static final class ReportBadBlocksResponseProto extends
com.google.protobuf.GeneratedMessage
implements ReportBadBlocksResponseProtoOrBuilder {
// Use ReportBadBlocksResponseProto.newBuilder() to construct.
private ReportBadBlocksResponseProto(Builder builder) {
super(builder);
}
private ReportBadBlocksResponseProto(boolean noInit) {}
private static final ReportBadBlocksResponseProto defaultInstance;
public static ReportBadBlocksResponseProto getDefaultInstance() {
return defaultInstance;
}
public ReportBadBlocksResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReportBadBlocksResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReportBadBlocksResponseProto_fieldAccessorTable;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReportBadBlocksResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReportBadBlocksResponseProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
}
}
}
// @@protoc_insertion_point(builder_scope:ReportBadBlocksResponseProto)
}
static {
defaultInstance = new ReportBadBlocksResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:ReportBadBlocksResponseProto)
}
public interface CommitBlockSynchronizationRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .ExtendedBlockProto block = 1;
boolean hasBlock();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder();
// required uint64 newGenStamp = 2;
boolean hasNewGenStamp();
long getNewGenStamp();
// required uint64 newLength = 3;
boolean hasNewLength();
long getNewLength();
// required bool closeFile = 4;
boolean hasCloseFile();
boolean getCloseFile();
// required bool deleteBlock = 5;
boolean hasDeleteBlock();
boolean getDeleteBlock();
// repeated .DatanodeIDProto newTaragets = 6;
java.util.List
getNewTaragetsList();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getNewTaragets(int index);
int getNewTaragetsCount();
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>
getNewTaragetsOrBuilderList();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getNewTaragetsOrBuilder(
int index);
// repeated string newTargetStorages = 7;
java.util.List getNewTargetStoragesList();
int getNewTargetStoragesCount();
String getNewTargetStorages(int index);
}
public static final class CommitBlockSynchronizationRequestProto extends
com.google.protobuf.GeneratedMessage
implements CommitBlockSynchronizationRequestProtoOrBuilder {
// Use CommitBlockSynchronizationRequestProto.newBuilder() to construct.
private CommitBlockSynchronizationRequestProto(Builder builder) {
super(builder);
}
private CommitBlockSynchronizationRequestProto(boolean noInit) {}
private static final CommitBlockSynchronizationRequestProto defaultInstance;
public static CommitBlockSynchronizationRequestProto getDefaultInstance() {
return defaultInstance;
}
public CommitBlockSynchronizationRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_CommitBlockSynchronizationRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_CommitBlockSynchronizationRequestProto_fieldAccessorTable;
}
private int bitField0_;
// required .ExtendedBlockProto block = 1;
public static final int BLOCK_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_;
public boolean hasBlock() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
return block_;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
return block_;
}
// required uint64 newGenStamp = 2;
public static final int NEWGENSTAMP_FIELD_NUMBER = 2;
private long newGenStamp_;
public boolean hasNewGenStamp() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public long getNewGenStamp() {
return newGenStamp_;
}
// required uint64 newLength = 3;
public static final int NEWLENGTH_FIELD_NUMBER = 3;
private long newLength_;
public boolean hasNewLength() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
public long getNewLength() {
return newLength_;
}
// required bool closeFile = 4;
public static final int CLOSEFILE_FIELD_NUMBER = 4;
private boolean closeFile_;
public boolean hasCloseFile() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
public boolean getCloseFile() {
return closeFile_;
}
// required bool deleteBlock = 5;
public static final int DELETEBLOCK_FIELD_NUMBER = 5;
private boolean deleteBlock_;
public boolean hasDeleteBlock() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
public boolean getDeleteBlock() {
return deleteBlock_;
}
// repeated .DatanodeIDProto newTaragets = 6;
public static final int NEWTARAGETS_FIELD_NUMBER = 6;
private java.util.List newTaragets_;
public java.util.List getNewTaragetsList() {
return newTaragets_;
}
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>
getNewTaragetsOrBuilderList() {
return newTaragets_;
}
public int getNewTaragetsCount() {
return newTaragets_.size();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getNewTaragets(int index) {
return newTaragets_.get(index);
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getNewTaragetsOrBuilder(
int index) {
return newTaragets_.get(index);
}
// repeated string newTargetStorages = 7;
public static final int NEWTARGETSTORAGES_FIELD_NUMBER = 7;
private com.google.protobuf.LazyStringList newTargetStorages_;
public java.util.List
getNewTargetStoragesList() {
return newTargetStorages_;
}
public int getNewTargetStoragesCount() {
return newTargetStorages_.size();
}
public String getNewTargetStorages(int index) {
return newTargetStorages_.get(index);
}
private void initFields() {
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
newGenStamp_ = 0L;
newLength_ = 0L;
closeFile_ = false;
deleteBlock_ = false;
newTaragets_ = java.util.Collections.emptyList();
newTargetStorages_ = com.google.protobuf.LazyStringArrayList.EMPTY;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasBlock()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasNewGenStamp()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasNewLength()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasCloseFile()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasDeleteBlock()) {
memoizedIsInitialized = 0;
return false;
}
if (!getBlock().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getNewTaragetsCount(); i++) {
if (!getNewTaragets(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, block_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, newGenStamp_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt64(3, newLength_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeBool(4, closeFile_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeBool(5, deleteBlock_);
}
for (int i = 0; i < newTaragets_.size(); i++) {
output.writeMessage(6, newTaragets_.get(i));
}
for (int i = 0; i < newTargetStorages_.size(); i++) {
output.writeBytes(7, newTargetStorages_.getByteString(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, block_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, newGenStamp_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(3, newLength_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(4, closeFile_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(5, deleteBlock_);
}
for (int i = 0; i < newTaragets_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(6, newTaragets_.get(i));
}
{
int dataSize = 0;
for (int i = 0; i < newTargetStorages_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeBytesSizeNoTag(newTargetStorages_.getByteString(i));
}
size += dataSize;
size += 1 * getNewTargetStoragesList().size();
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto) obj;
boolean result = true;
result = result && (hasBlock() == other.hasBlock());
if (hasBlock()) {
result = result && getBlock()
.equals(other.getBlock());
}
result = result && (hasNewGenStamp() == other.hasNewGenStamp());
if (hasNewGenStamp()) {
result = result && (getNewGenStamp()
== other.getNewGenStamp());
}
result = result && (hasNewLength() == other.hasNewLength());
if (hasNewLength()) {
result = result && (getNewLength()
== other.getNewLength());
}
result = result && (hasCloseFile() == other.hasCloseFile());
if (hasCloseFile()) {
result = result && (getCloseFile()
== other.getCloseFile());
}
result = result && (hasDeleteBlock() == other.hasDeleteBlock());
if (hasDeleteBlock()) {
result = result && (getDeleteBlock()
== other.getDeleteBlock());
}
result = result && getNewTaragetsList()
.equals(other.getNewTaragetsList());
result = result && getNewTargetStoragesList()
.equals(other.getNewTargetStoragesList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasBlock()) {
hash = (37 * hash) + BLOCK_FIELD_NUMBER;
hash = (53 * hash) + getBlock().hashCode();
}
if (hasNewGenStamp()) {
hash = (37 * hash) + NEWGENSTAMP_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getNewGenStamp());
}
if (hasNewLength()) {
hash = (37 * hash) + NEWLENGTH_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getNewLength());
}
if (hasCloseFile()) {
hash = (37 * hash) + CLOSEFILE_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getCloseFile());
}
if (hasDeleteBlock()) {
hash = (37 * hash) + DELETEBLOCK_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getDeleteBlock());
}
if (getNewTaragetsCount() > 0) {
hash = (37 * hash) + NEWTARAGETS_FIELD_NUMBER;
hash = (53 * hash) + getNewTaragetsList().hashCode();
}
if (getNewTargetStoragesCount() > 0) {
hash = (37 * hash) + NEWTARGETSTORAGES_FIELD_NUMBER;
hash = (53 * hash) + getNewTargetStoragesList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_CommitBlockSynchronizationRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_CommitBlockSynchronizationRequestProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getBlockFieldBuilder();
getNewTaragetsFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (blockBuilder_ == null) {
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
newGenStamp_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
newLength_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
closeFile_ = false;
bitField0_ = (bitField0_ & ~0x00000008);
deleteBlock_ = false;
bitField0_ = (bitField0_ & ~0x00000010);
if (newTaragetsBuilder_ == null) {
newTaragets_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000020);
} else {
newTaragetsBuilder_.clear();
}
newTargetStorages_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000040);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (blockBuilder_ == null) {
result.block_ = block_;
} else {
result.block_ = blockBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.newGenStamp_ = newGenStamp_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.newLength_ = newLength_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.closeFile_ = closeFile_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.deleteBlock_ = deleteBlock_;
if (newTaragetsBuilder_ == null) {
if (((bitField0_ & 0x00000020) == 0x00000020)) {
newTaragets_ = java.util.Collections.unmodifiableList(newTaragets_);
bitField0_ = (bitField0_ & ~0x00000020);
}
result.newTaragets_ = newTaragets_;
} else {
result.newTaragets_ = newTaragetsBuilder_.build();
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
newTargetStorages_ = new com.google.protobuf.UnmodifiableLazyStringList(
newTargetStorages_);
bitField0_ = (bitField0_ & ~0x00000040);
}
result.newTargetStorages_ = newTargetStorages_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.getDefaultInstance()) return this;
if (other.hasBlock()) {
mergeBlock(other.getBlock());
}
if (other.hasNewGenStamp()) {
setNewGenStamp(other.getNewGenStamp());
}
if (other.hasNewLength()) {
setNewLength(other.getNewLength());
}
if (other.hasCloseFile()) {
setCloseFile(other.getCloseFile());
}
if (other.hasDeleteBlock()) {
setDeleteBlock(other.getDeleteBlock());
}
if (newTaragetsBuilder_ == null) {
if (!other.newTaragets_.isEmpty()) {
if (newTaragets_.isEmpty()) {
newTaragets_ = other.newTaragets_;
bitField0_ = (bitField0_ & ~0x00000020);
} else {
ensureNewTaragetsIsMutable();
newTaragets_.addAll(other.newTaragets_);
}
onChanged();
}
} else {
if (!other.newTaragets_.isEmpty()) {
if (newTaragetsBuilder_.isEmpty()) {
newTaragetsBuilder_.dispose();
newTaragetsBuilder_ = null;
newTaragets_ = other.newTaragets_;
bitField0_ = (bitField0_ & ~0x00000020);
newTaragetsBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getNewTaragetsFieldBuilder() : null;
} else {
newTaragetsBuilder_.addAllMessages(other.newTaragets_);
}
}
}
if (!other.newTargetStorages_.isEmpty()) {
if (newTargetStorages_.isEmpty()) {
newTargetStorages_ = other.newTargetStorages_;
bitField0_ = (bitField0_ & ~0x00000040);
} else {
ensureNewTargetStoragesIsMutable();
newTargetStorages_.addAll(other.newTargetStorages_);
}
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasBlock()) {
return false;
}
if (!hasNewGenStamp()) {
return false;
}
if (!hasNewLength()) {
return false;
}
if (!hasCloseFile()) {
return false;
}
if (!hasDeleteBlock()) {
return false;
}
if (!getBlock().isInitialized()) {
return false;
}
for (int i = 0; i < getNewTaragetsCount(); i++) {
if (!getNewTaragets(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder();
if (hasBlock()) {
subBuilder.mergeFrom(getBlock());
}
input.readMessage(subBuilder, extensionRegistry);
setBlock(subBuilder.buildPartial());
break;
}
case 16: {
bitField0_ |= 0x00000002;
newGenStamp_ = input.readUInt64();
break;
}
case 24: {
bitField0_ |= 0x00000004;
newLength_ = input.readUInt64();
break;
}
case 32: {
bitField0_ |= 0x00000008;
closeFile_ = input.readBool();
break;
}
case 40: {
bitField0_ |= 0x00000010;
deleteBlock_ = input.readBool();
break;
}
case 50: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder();
input.readMessage(subBuilder, extensionRegistry);
addNewTaragets(subBuilder.buildPartial());
break;
}
case 58: {
ensureNewTargetStoragesIsMutable();
newTargetStorages_.add(input.readBytes());
break;
}
}
}
}
private int bitField0_;
// required .ExtendedBlockProto block = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_;
public boolean hasBlock() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
if (blockBuilder_ == null) {
return block_;
} else {
return blockBuilder_.getMessage();
}
}
public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
if (blockBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
block_ = value;
onChanged();
} else {
blockBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder setBlock(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) {
if (blockBuilder_ == null) {
block_ = builderForValue.build();
onChanged();
} else {
blockBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
if (blockBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) {
block_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial();
} else {
block_ = value;
}
onChanged();
} else {
blockBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder clearBlock() {
if (blockBuilder_ == null) {
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
onChanged();
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getBlockFieldBuilder().getBuilder();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
if (blockBuilder_ != null) {
return blockBuilder_.getMessageOrBuilder();
} else {
return block_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>
getBlockFieldBuilder() {
if (blockBuilder_ == null) {
blockBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>(
block_,
getParentForChildren(),
isClean());
block_ = null;
}
return blockBuilder_;
}
// required uint64 newGenStamp = 2;
private long newGenStamp_ ;
public boolean hasNewGenStamp() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public long getNewGenStamp() {
return newGenStamp_;
}
public Builder setNewGenStamp(long value) {
bitField0_ |= 0x00000002;
newGenStamp_ = value;
onChanged();
return this;
}
public Builder clearNewGenStamp() {
bitField0_ = (bitField0_ & ~0x00000002);
newGenStamp_ = 0L;
onChanged();
return this;
}
// required uint64 newLength = 3;
private long newLength_ ;
public boolean hasNewLength() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
public long getNewLength() {
return newLength_;
}
public Builder setNewLength(long value) {
bitField0_ |= 0x00000004;
newLength_ = value;
onChanged();
return this;
}
public Builder clearNewLength() {
bitField0_ = (bitField0_ & ~0x00000004);
newLength_ = 0L;
onChanged();
return this;
}
// required bool closeFile = 4;
private boolean closeFile_ ;
public boolean hasCloseFile() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
public boolean getCloseFile() {
return closeFile_;
}
public Builder setCloseFile(boolean value) {
bitField0_ |= 0x00000008;
closeFile_ = value;
onChanged();
return this;
}
public Builder clearCloseFile() {
bitField0_ = (bitField0_ & ~0x00000008);
closeFile_ = false;
onChanged();
return this;
}
// required bool deleteBlock = 5;
private boolean deleteBlock_ ;
public boolean hasDeleteBlock() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
public boolean getDeleteBlock() {
return deleteBlock_;
}
public Builder setDeleteBlock(boolean value) {
bitField0_ |= 0x00000010;
deleteBlock_ = value;
onChanged();
return this;
}
public Builder clearDeleteBlock() {
bitField0_ = (bitField0_ & ~0x00000010);
deleteBlock_ = false;
onChanged();
return this;
}
// repeated .DatanodeIDProto newTaragets = 6;
private java.util.List newTaragets_ =
java.util.Collections.emptyList();
private void ensureNewTaragetsIsMutable() {
if (!((bitField0_ & 0x00000020) == 0x00000020)) {
newTaragets_ = new java.util.ArrayList(newTaragets_);
bitField0_ |= 0x00000020;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> newTaragetsBuilder_;
public java.util.List getNewTaragetsList() {
if (newTaragetsBuilder_ == null) {
return java.util.Collections.unmodifiableList(newTaragets_);
} else {
return newTaragetsBuilder_.getMessageList();
}
}
public int getNewTaragetsCount() {
if (newTaragetsBuilder_ == null) {
return newTaragets_.size();
} else {
return newTaragetsBuilder_.getCount();
}
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getNewTaragets(int index) {
if (newTaragetsBuilder_ == null) {
return newTaragets_.get(index);
} else {
return newTaragetsBuilder_.getMessage(index);
}
}
public Builder setNewTaragets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
if (newTaragetsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureNewTaragetsIsMutable();
newTaragets_.set(index, value);
onChanged();
} else {
newTaragetsBuilder_.setMessage(index, value);
}
return this;
}
public Builder setNewTaragets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) {
if (newTaragetsBuilder_ == null) {
ensureNewTaragetsIsMutable();
newTaragets_.set(index, builderForValue.build());
onChanged();
} else {
newTaragetsBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
public Builder addNewTaragets(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
if (newTaragetsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureNewTaragetsIsMutable();
newTaragets_.add(value);
onChanged();
} else {
newTaragetsBuilder_.addMessage(value);
}
return this;
}
public Builder addNewTaragets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
if (newTaragetsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureNewTaragetsIsMutable();
newTaragets_.add(index, value);
onChanged();
} else {
newTaragetsBuilder_.addMessage(index, value);
}
return this;
}
public Builder addNewTaragets(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) {
if (newTaragetsBuilder_ == null) {
ensureNewTaragetsIsMutable();
newTaragets_.add(builderForValue.build());
onChanged();
} else {
newTaragetsBuilder_.addMessage(builderForValue.build());
}
return this;
}
public Builder addNewTaragets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) {
if (newTaragetsBuilder_ == null) {
ensureNewTaragetsIsMutable();
newTaragets_.add(index, builderForValue.build());
onChanged();
} else {
newTaragetsBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
public Builder addAllNewTaragets(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto> values) {
if (newTaragetsBuilder_ == null) {
ensureNewTaragetsIsMutable();
super.addAll(values, newTaragets_);
onChanged();
} else {
newTaragetsBuilder_.addAllMessages(values);
}
return this;
}
public Builder clearNewTaragets() {
if (newTaragetsBuilder_ == null) {
newTaragets_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000020);
onChanged();
} else {
newTaragetsBuilder_.clear();
}
return this;
}
public Builder removeNewTaragets(int index) {
if (newTaragetsBuilder_ == null) {
ensureNewTaragetsIsMutable();
newTaragets_.remove(index);
onChanged();
} else {
newTaragetsBuilder_.remove(index);
}
return this;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder getNewTaragetsBuilder(
int index) {
return getNewTaragetsFieldBuilder().getBuilder(index);
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getNewTaragetsOrBuilder(
int index) {
if (newTaragetsBuilder_ == null) {
return newTaragets_.get(index); } else {
return newTaragetsBuilder_.getMessageOrBuilder(index);
}
}
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>
getNewTaragetsOrBuilderList() {
if (newTaragetsBuilder_ != null) {
return newTaragetsBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(newTaragets_);
}
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder addNewTaragetsBuilder() {
return getNewTaragetsFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance());
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder addNewTaragetsBuilder(
int index) {
return getNewTaragetsFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance());
}
public java.util.List
getNewTaragetsBuilderList() {
return getNewTaragetsFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>
getNewTaragetsFieldBuilder() {
if (newTaragetsBuilder_ == null) {
newTaragetsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>(
newTaragets_,
((bitField0_ & 0x00000020) == 0x00000020),
getParentForChildren(),
isClean());
newTaragets_ = null;
}
return newTaragetsBuilder_;
}
// repeated string newTargetStorages = 7;
private com.google.protobuf.LazyStringList newTargetStorages_ = com.google.protobuf.LazyStringArrayList.EMPTY;
private void ensureNewTargetStoragesIsMutable() {
if (!((bitField0_ & 0x00000040) == 0x00000040)) {
newTargetStorages_ = new com.google.protobuf.LazyStringArrayList(newTargetStorages_);
bitField0_ |= 0x00000040;
}
}
public java.util.List
getNewTargetStoragesList() {
return java.util.Collections.unmodifiableList(newTargetStorages_);
}
public int getNewTargetStoragesCount() {
return newTargetStorages_.size();
}
public String getNewTargetStorages(int index) {
return newTargetStorages_.get(index);
}
public Builder setNewTargetStorages(
int index, String value) {
if (value == null) {
throw new NullPointerException();
}
ensureNewTargetStoragesIsMutable();
newTargetStorages_.set(index, value);
onChanged();
return this;
}
public Builder addNewTargetStorages(String value) {
if (value == null) {
throw new NullPointerException();
}
ensureNewTargetStoragesIsMutable();
newTargetStorages_.add(value);
onChanged();
return this;
}
public Builder addAllNewTargetStorages(
java.lang.Iterable values) {
ensureNewTargetStoragesIsMutable();
super.addAll(values, newTargetStorages_);
onChanged();
return this;
}
public Builder clearNewTargetStorages() {
newTargetStorages_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000040);
onChanged();
return this;
}
void addNewTargetStorages(com.google.protobuf.ByteString value) {
ensureNewTargetStoragesIsMutable();
newTargetStorages_.add(value);
onChanged();
}
// @@protoc_insertion_point(builder_scope:CommitBlockSynchronizationRequestProto)
}
static {
defaultInstance = new CommitBlockSynchronizationRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:CommitBlockSynchronizationRequestProto)
}
public interface CommitBlockSynchronizationResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
}
public static final class CommitBlockSynchronizationResponseProto extends
com.google.protobuf.GeneratedMessage
implements CommitBlockSynchronizationResponseProtoOrBuilder {
// Use CommitBlockSynchronizationResponseProto.newBuilder() to construct.
private CommitBlockSynchronizationResponseProto(Builder builder) {
super(builder);
}
private CommitBlockSynchronizationResponseProto(boolean noInit) {}
private static final CommitBlockSynchronizationResponseProto defaultInstance;
public static CommitBlockSynchronizationResponseProto getDefaultInstance() {
return defaultInstance;
}
public CommitBlockSynchronizationResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_CommitBlockSynchronizationResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_CommitBlockSynchronizationResponseProto_fieldAccessorTable;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_CommitBlockSynchronizationResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_CommitBlockSynchronizationResponseProto_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDescriptor();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
}
}
}
// @@protoc_insertion_point(builder_scope:CommitBlockSynchronizationResponseProto)
}
static {
defaultInstance = new CommitBlockSynchronizationResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:CommitBlockSynchronizationResponseProto)
}
public static abstract class DatanodeProtocolService
implements com.google.protobuf.Service {
protected DatanodeProtocolService() {}
public interface Interface {
public abstract void registerDatanode(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto request,
com.google.protobuf.RpcCallback done);
public abstract void sendHeartbeat(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto request,
com.google.protobuf.RpcCallback done);
public abstract void blockReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto request,
com.google.protobuf.RpcCallback done);
public abstract void blockReceivedAndDeleted(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto request,
com.google.protobuf.RpcCallback done);
public abstract void errorReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto request,
com.google.protobuf.RpcCallback done);
public abstract void versionRequest(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto request,
com.google.protobuf.RpcCallback done);
public abstract void processUpgrade(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto request,
com.google.protobuf.RpcCallback done);
public abstract void reportBadBlocks(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto request,
com.google.protobuf.RpcCallback done);
public abstract void commitBlockSynchronization(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto request,
com.google.protobuf.RpcCallback done);
}
public static com.google.protobuf.Service newReflectiveService(
final Interface impl) {
return new DatanodeProtocolService() {
@java.lang.Override
public void registerDatanode(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.registerDatanode(controller, request, done);
}
@java.lang.Override
public void sendHeartbeat(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.sendHeartbeat(controller, request, done);
}
@java.lang.Override
public void blockReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.blockReport(controller, request, done);
}
@java.lang.Override
public void blockReceivedAndDeleted(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.blockReceivedAndDeleted(controller, request, done);
}
@java.lang.Override
public void errorReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.errorReport(controller, request, done);
}
@java.lang.Override
public void versionRequest(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.versionRequest(controller, request, done);
}
@java.lang.Override
public void processUpgrade(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.processUpgrade(controller, request, done);
}
@java.lang.Override
public void reportBadBlocks(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.reportBadBlocks(controller, request, done);
}
@java.lang.Override
public void commitBlockSynchronization(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.commitBlockSynchronization(controller, request, done);
}
};
}
public static com.google.protobuf.BlockingService
newReflectiveBlockingService(final BlockingInterface impl) {
return new com.google.protobuf.BlockingService() {
public final com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptorForType() {
return getDescriptor();
}
public final com.google.protobuf.Message callBlockingMethod(
com.google.protobuf.Descriptors.MethodDescriptor method,
com.google.protobuf.RpcController controller,
com.google.protobuf.Message request)
throws com.google.protobuf.ServiceException {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.callBlockingMethod() given method descriptor for " +
"wrong service type.");
}
switch(method.getIndex()) {
case 0:
return impl.registerDatanode(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto)request);
case 1:
return impl.sendHeartbeat(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto)request);
case 2:
return impl.blockReport(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto)request);
case 3:
return impl.blockReceivedAndDeleted(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto)request);
case 4:
return impl.errorReport(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto)request);
case 5:
return impl.versionRequest(controller, (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto)request);
case 6:
return impl.processUpgrade(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto)request);
case 7:
return impl.reportBadBlocks(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto)request);
case 8:
return impl.commitBlockSynchronization(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto)request);
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getRequestPrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getRequestPrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.getDefaultInstance();
case 1:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
case 2:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.getDefaultInstance();
case 3:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.getDefaultInstance();
case 4:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance();
case 5:
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto.getDefaultInstance();
case 6:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto.getDefaultInstance();
case 7:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.getDefaultInstance();
case 8:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getResponsePrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getResponsePrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDefaultInstance();
case 1:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
case 2:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDefaultInstance();
case 3:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDefaultInstance();
case 4:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance();
case 5:
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.getDefaultInstance();
case 6:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.getDefaultInstance();
case 7:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance();
case 8:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
};
}
public abstract void registerDatanode(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto request,
com.google.protobuf.RpcCallback done);
public abstract void sendHeartbeat(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto request,
com.google.protobuf.RpcCallback done);
public abstract void blockReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto request,
com.google.protobuf.RpcCallback done);
public abstract void blockReceivedAndDeleted(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto request,
com.google.protobuf.RpcCallback done);
public abstract void errorReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto request,
com.google.protobuf.RpcCallback done);
public abstract void versionRequest(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto request,
com.google.protobuf.RpcCallback done);
public abstract void processUpgrade(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto request,
com.google.protobuf.RpcCallback done);
public abstract void reportBadBlocks(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto request,
com.google.protobuf.RpcCallback done);
public abstract void commitBlockSynchronization(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto request,
com.google.protobuf.RpcCallback done);
public static final
com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.getDescriptor().getServices().get(0);
}
public final com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptorForType() {
return getDescriptor();
}
public final void callMethod(
com.google.protobuf.Descriptors.MethodDescriptor method,
com.google.protobuf.RpcController controller,
com.google.protobuf.Message request,
com.google.protobuf.RpcCallback<
com.google.protobuf.Message> done) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.callMethod() given method descriptor for wrong " +
"service type.");
}
switch(method.getIndex()) {
case 0:
this.registerDatanode(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 1:
this.sendHeartbeat(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 2:
this.blockReport(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 3:
this.blockReceivedAndDeleted(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 4:
this.errorReport(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 5:
this.versionRequest(controller, (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 6:
this.processUpgrade(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 7:
this.reportBadBlocks(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 8:
this.commitBlockSynchronization(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getRequestPrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getRequestPrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.getDefaultInstance();
case 1:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
case 2:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.getDefaultInstance();
case 3:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.getDefaultInstance();
case 4:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance();
case 5:
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto.getDefaultInstance();
case 6:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto.getDefaultInstance();
case 7:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.getDefaultInstance();
case 8:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getResponsePrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getResponsePrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDefaultInstance();
case 1:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
case 2:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDefaultInstance();
case 3:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDefaultInstance();
case 4:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance();
case 5:
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.getDefaultInstance();
case 6:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.getDefaultInstance();
case 7:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance();
case 8:
return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public static Stub newStub(
com.google.protobuf.RpcChannel channel) {
return new Stub(channel);
}
public static final class Stub extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeProtocolService implements Interface {
private Stub(com.google.protobuf.RpcChannel channel) {
this.channel = channel;
}
private final com.google.protobuf.RpcChannel channel;
public com.google.protobuf.RpcChannel getChannel() {
return channel;
}
public void registerDatanode(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(0),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDefaultInstance()));
}
public void sendHeartbeat(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(1),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDefaultInstance()));
}
public void blockReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(2),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDefaultInstance()));
}
public void blockReceivedAndDeleted(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(3),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDefaultInstance()));
}
public void errorReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(4),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance()));
}
public void versionRequest(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(5),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.getDefaultInstance()));
}
public void processUpgrade(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(6),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.getDefaultInstance()));
}
public void reportBadBlocks(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(7),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance()));
}
public void commitBlockSynchronization(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(8),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDefaultInstance()));
}
}
public static BlockingInterface newBlockingStub(
com.google.protobuf.BlockingRpcChannel channel) {
return new BlockingStub(channel);
}
public interface BlockingInterface {
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto registerDatanode(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto sendHeartbeat(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto blockReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto blockReceivedAndDeleted(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto errorReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto versionRequest(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto processUpgrade(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto reportBadBlocks(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto commitBlockSynchronization(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto request)
throws com.google.protobuf.ServiceException;
}
private static final class BlockingStub implements BlockingInterface {
private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
this.channel = channel;
}
private final com.google.protobuf.BlockingRpcChannel channel;
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto registerDatanode(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(0),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDefaultInstance());
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto sendHeartbeat(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(1),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDefaultInstance());
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto blockReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(2),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDefaultInstance());
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto blockReceivedAndDeleted(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(3),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDefaultInstance());
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto errorReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(4),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance());
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto versionRequest(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(5),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.getDefaultInstance());
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto processUpgrade(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(6),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.getDefaultInstance());
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto reportBadBlocks(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(7),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance());
}
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto commitBlockSynchronization(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(8),
controller,
request,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDefaultInstance());
}
}
}
private static com.google.protobuf.Descriptors.Descriptor
internal_static_DatanodeRegistrationProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_DatanodeRegistrationProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_DatanodeStorageProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_DatanodeStorageProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_DatanodeCommandProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_DatanodeCommandProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_BalancerBandwidthCommandProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_BalancerBandwidthCommandProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_BlockCommandProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_BlockCommandProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_BlockRecoveryCommandProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_BlockRecoveryCommandProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_FinalizeCommandProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_FinalizeCommandProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_KeyUpdateCommandProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_KeyUpdateCommandProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_RegisterCommandProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_RegisterCommandProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_UpgradeCommandProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_UpgradeCommandProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_RegisterDatanodeRequestProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_RegisterDatanodeRequestProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_RegisterDatanodeResponseProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_RegisterDatanodeResponseProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_HeartbeatRequestProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_HeartbeatRequestProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_StorageReportProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_StorageReportProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_NNHAStatusHeartbeatProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_NNHAStatusHeartbeatProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_HeartbeatResponseProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_HeartbeatResponseProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_BlockReportRequestProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_BlockReportRequestProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_StorageBlockReportProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_StorageBlockReportProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_BlockReportResponseProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_BlockReportResponseProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_ReceivedDeletedBlockInfoProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_ReceivedDeletedBlockInfoProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_StorageReceivedDeletedBlocksProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_StorageReceivedDeletedBlocksProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_BlockReceivedAndDeletedRequestProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_BlockReceivedAndDeletedRequestProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_BlockReceivedAndDeletedResponseProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_BlockReceivedAndDeletedResponseProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_ErrorReportRequestProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_ErrorReportRequestProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_ErrorReportResponseProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_ErrorReportResponseProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_ProcessUpgradeRequestProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_ProcessUpgradeRequestProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_ProcessUpgradeResponseProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_ProcessUpgradeResponseProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_ReportBadBlocksRequestProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_ReportBadBlocksRequestProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_ReportBadBlocksResponseProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_ReportBadBlocksResponseProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_CommitBlockSynchronizationRequestProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_CommitBlockSynchronizationRequestProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_CommitBlockSynchronizationResponseProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_CommitBlockSynchronizationResponseProto_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
}
private static com.google.protobuf.Descriptors.FileDescriptor
descriptor;
static {
java.lang.String[] descriptorData = {
"\n\026DatanodeProtocol.proto\032\nhdfs.proto\"\251\001\n" +
"\031DatanodeRegistrationProto\022$\n\ndatanodeID" +
"\030\001 \002(\0132\020.DatanodeIDProto\022&\n\013storageInfo\030" +
"\002 \002(\0132\021.StorageInfoProto\022%\n\004keys\030\003 \002(\0132\027" +
".ExportedBlockKeysProto\022\027\n\017softwareVersi" +
"on\030\004 \002(\t\"\217\001\n\024DatanodeStorageProto\022\021\n\tsto" +
"rageID\030\001 \002(\t\0229\n\005state\030\002 \001(\0162\".DatanodeSt" +
"orageProto.StorageState:\006NORMAL\")\n\014Stora" +
"geState\022\n\n\006NORMAL\020\000\022\r\n\tREAD_ONLY\020\001\"\275\004\n\024D" +
"atanodeCommandProto\022+\n\007cmdType\030\001 \002(\0162\032.D",
"atanodeCommandProto.Type\0223\n\013balancerCmd\030" +
"\002 \001(\0132\036.BalancerBandwidthCommandProto\022\"\n" +
"\006blkCmd\030\003 \001(\0132\022.BlockCommandProto\022/\n\013rec" +
"overyCmd\030\004 \001(\0132\032.BlockRecoveryCommandPro" +
"to\022*\n\013finalizeCmd\030\005 \001(\0132\025.FinalizeComman" +
"dProto\022,\n\014keyUpdateCmd\030\006 \001(\0132\026.KeyUpdate" +
"CommandProto\022*\n\013registerCmd\030\007 \001(\0132\025.Regi" +
"sterCommandProto\022(\n\nupgradeCmd\030\010 \001(\0132\024.U" +
"pgradeCommandProto\"\275\001\n\004Type\022\034\n\030BalancerB" +
"andwidthCommand\020\000\022\020\n\014BlockCommand\020\001\022\030\n\024B",
"lockRecoveryCommand\020\002\022\023\n\017FinalizeCommand" +
"\020\003\022\024\n\020KeyUpdateCommand\020\004\022\023\n\017RegisterComm" +
"and\020\005\022\022\n\016UpgradeCommand\020\006\022\027\n\023NullDatanod" +
"eCommand\020\007\"2\n\035BalancerBandwidthCommandPr" +
"oto\022\021\n\tbandwidth\030\001 \002(\004\"\314\001\n\021BlockCommandP" +
"roto\022)\n\006action\030\001 \002(\0162\031.BlockCommandProto" +
".Action\022\023\n\013blockPoolId\030\002 \002(\t\022\033\n\006blocks\030\003" +
" \003(\0132\013.BlockProto\022$\n\007targets\030\004 \003(\0132\023.Dat" +
"anodeInfosProto\"4\n\006Action\022\014\n\010TRANSFER\020\001\022" +
"\016\n\nINVALIDATE\020\002\022\014\n\010SHUTDOWN\020\003\"B\n\031BlockRe",
"coveryCommandProto\022%\n\006blocks\030\001 \003(\0132\025.Rec" +
"overingBlockProto\"+\n\024FinalizeCommandProt" +
"o\022\023\n\013blockPoolId\030\001 \002(\t\">\n\025KeyUpdateComma" +
"ndProto\022%\n\004keys\030\001 \002(\0132\027.ExportedBlockKey" +
"sProto\"\026\n\024RegisterCommandProto\"\247\001\n\023Upgra" +
"deCommandProto\022+\n\006action\030\001 \002(\0162\033.Upgrade" +
"CommandProto.Action\022\017\n\007version\030\002 \002(\r\022\025\n\r" +
"upgradeStatus\030\003 \002(\r\";\n\006Action\022\013\n\007UNKNOWN" +
"\020\000\022\021\n\rREPORT_STATUS\020d\022\021\n\rSTART_UPGRADE\020e" +
"\"P\n\034RegisterDatanodeRequestProto\0220\n\014regi",
"stration\030\001 \002(\0132\032.DatanodeRegistrationPro" +
"to\"Q\n\035RegisterDatanodeResponseProto\0220\n\014r" +
"egistration\030\001 \002(\0132\032.DatanodeRegistration" +
"Proto\"\276\001\n\025HeartbeatRequestProto\0220\n\014regis" +
"tration\030\001 \002(\0132\032.DatanodeRegistrationProt" +
"o\022$\n\007reports\030\002 \003(\0132\023.StorageReportProto\022" +
"\032\n\017xmitsInProgress\030\003 \001(\r:\0010\022\027\n\014xceiverCo" +
"unt\030\004 \001(\r:\0010\022\030\n\rfailedVolumes\030\005 \001(\r:\0010\"\227" +
"\001\n\022StorageReportProto\022\021\n\tstorageID\030\001 \002(\t" +
"\022\025\n\006failed\030\002 \001(\010:\005false\022\023\n\010capacity\030\003 \001(",
"\004:\0010\022\022\n\007dfsUsed\030\004 \001(\004:\0010\022\024\n\tremaining\030\005 " +
"\001(\004:\0010\022\030\n\rblockPoolUsed\030\006 \001(\004:\0010\"z\n\030NNHA" +
"StatusHeartbeatProto\022.\n\005state\030\001 \002(\0162\037.NN" +
"HAStatusHeartbeatProto.State\022\014\n\004txid\030\002 \002" +
"(\004\" \n\005State\022\n\n\006ACTIVE\020\000\022\013\n\007STANDBY\020\001\"j\n\026" +
"HeartbeatResponseProto\022#\n\004cmds\030\001 \003(\0132\025.D" +
"atanodeCommandProto\022+\n\010haStatus\030\002 \002(\0132\031." +
"NNHAStatusHeartbeatProto\"\213\001\n\027BlockReport" +
"RequestProto\0220\n\014registration\030\001 \002(\0132\032.Dat" +
"anodeRegistrationProto\022\023\n\013blockPoolId\030\002 ",
"\002(\t\022)\n\007reports\030\003 \003(\0132\030.StorageBlockRepor" +
"tProto\"U\n\027StorageBlockReportProto\022&\n\007sto" +
"rage\030\001 \002(\0132\025.DatanodeStorageProto\022\022\n\006blo" +
"cks\030\002 \003(\004B\002\020\001\">\n\030BlockReportResponseProt" +
"o\022\"\n\003cmd\030\001 \001(\0132\025.DatanodeCommandProto\"\304\001" +
"\n\035ReceivedDeletedBlockInfoProto\022\032\n\005block" +
"\030\001 \002(\0132\013.BlockProto\022:\n\006status\030\003 \002(\0162*.Re" +
"ceivedDeletedBlockInfoProto.BlockStatus\022" +
"\022\n\ndeleteHint\030\002 \001(\t\"7\n\013BlockStatus\022\r\n\tRE" +
"CEIVING\020\001\022\014\n\010RECEIVED\020\002\022\013\n\007DELETED\020\003\"f\n!",
"StorageReceivedDeletedBlocksProto\022\021\n\tsto" +
"rageID\030\001 \002(\t\022.\n\006blocks\030\002 \003(\0132\036.ReceivedD" +
"eletedBlockInfoProto\"\240\001\n#BlockReceivedAn" +
"dDeletedRequestProto\0220\n\014registration\030\001 \002" +
"(\0132\032.DatanodeRegistrationProto\022\023\n\013blockP" +
"oolId\030\002 \002(\t\0222\n\006blocks\030\003 \003(\0132\".StorageRec" +
"eivedDeletedBlocksProto\"&\n$BlockReceived" +
"AndDeletedResponseProto\"\275\001\n\027ErrorReportR" +
"equestProto\0220\n\014registartion\030\001 \002(\0132\032.Data" +
"nodeRegistrationProto\022\021\n\terrorCode\030\002 \002(\r",
"\022\013\n\003msg\030\003 \002(\t\"P\n\tErrorCode\022\n\n\006NOTIFY\020\000\022\016" +
"\n\nDISK_ERROR\020\001\022\021\n\rINVALID_BLOCK\020\002\022\024\n\020FAT" +
"AL_DISK_ERROR\020\003\"\032\n\030ErrorReportResponsePr" +
"oto\"?\n\032ProcessUpgradeRequestProto\022!\n\003cmd" +
"\030\001 \001(\0132\024.UpgradeCommandProto\"@\n\033ProcessU" +
"pgradeResponseProto\022!\n\003cmd\030\001 \001(\0132\024.Upgra" +
"deCommandProto\"A\n\033ReportBadBlocksRequest" +
"Proto\022\"\n\006blocks\030\001 \003(\0132\022.LocatedBlockProt" +
"o\"\036\n\034ReportBadBlocksResponseProto\"\336\001\n&Co" +
"mmitBlockSynchronizationRequestProto\022\"\n\005",
"block\030\001 \002(\0132\023.ExtendedBlockProto\022\023\n\013newG" +
"enStamp\030\002 \002(\004\022\021\n\tnewLength\030\003 \002(\004\022\021\n\tclos" +
"eFile\030\004 \002(\010\022\023\n\013deleteBlock\030\005 \002(\010\022%\n\013newT" +
"aragets\030\006 \003(\0132\020.DatanodeIDProto\022\031\n\021newTa" +
"rgetStorages\030\007 \003(\t\")\n\'CommitBlockSynchro" +
"nizationResponseProto2\353\005\n\027DatanodeProtoc" +
"olService\022Q\n\020registerDatanode\022\035.Register" +
"DatanodeRequestProto\032\036.RegisterDatanodeR" +
"esponseProto\022@\n\rsendHeartbeat\022\026.Heartbea" +
"tRequestProto\032\027.HeartbeatResponseProto\022B",
"\n\013blockReport\022\030.BlockReportRequestProto\032" +
"\031.BlockReportResponseProto\022f\n\027blockRecei" +
"vedAndDeleted\022$.BlockReceivedAndDeletedR" +
"equestProto\032%.BlockReceivedAndDeletedRes" +
"ponseProto\022B\n\013errorReport\022\030.ErrorReportR" +
"equestProto\032\031.ErrorReportResponseProto\022=" +
"\n\016versionRequest\022\024.VersionRequestProto\032\025" +
".VersionResponseProto\022K\n\016processUpgrade\022" +
"\033.ProcessUpgradeRequestProto\032\034.ProcessUp" +
"gradeResponseProto\022N\n\017reportBadBlocks\022\034.",
"ReportBadBlocksRequestProto\032\035.ReportBadB" +
"locksResponseProto\022o\n\032commitBlockSynchro" +
"nization\022\'.CommitBlockSynchronizationReq" +
"uestProto\032(.CommitBlockSynchronizationRe" +
"sponseProtoBE\n%org.apache.hadoop.hdfs.pr" +
"otocol.protoB\026DatanodeProtocolProtos\210\001\001\240" +
"\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
public com.google.protobuf.ExtensionRegistry assignDescriptors(
com.google.protobuf.Descriptors.FileDescriptor root) {
descriptor = root;
internal_static_DatanodeRegistrationProto_descriptor =
getDescriptor().getMessageTypes().get(0);
internal_static_DatanodeRegistrationProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_DatanodeRegistrationProto_descriptor,
new java.lang.String[] { "DatanodeID", "StorageInfo", "Keys", "SoftwareVersion", },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder.class);
internal_static_DatanodeStorageProto_descriptor =
getDescriptor().getMessageTypes().get(1);
internal_static_DatanodeStorageProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_DatanodeStorageProto_descriptor,
new java.lang.String[] { "StorageID", "State", },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.Builder.class);
internal_static_DatanodeCommandProto_descriptor =
getDescriptor().getMessageTypes().get(2);
internal_static_DatanodeCommandProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_DatanodeCommandProto_descriptor,
new java.lang.String[] { "CmdType", "BalancerCmd", "BlkCmd", "RecoveryCmd", "FinalizeCmd", "KeyUpdateCmd", "RegisterCmd", "UpgradeCmd", },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder.class);
internal_static_BalancerBandwidthCommandProto_descriptor =
getDescriptor().getMessageTypes().get(3);
internal_static_BalancerBandwidthCommandProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_BalancerBandwidthCommandProto_descriptor,
new java.lang.String[] { "Bandwidth", },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder.class);
internal_static_BlockCommandProto_descriptor =
getDescriptor().getMessageTypes().get(4);
internal_static_BlockCommandProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_BlockCommandProto_descriptor,
new java.lang.String[] { "Action", "BlockPoolId", "Blocks", "Targets", },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder.class);
internal_static_BlockRecoveryCommandProto_descriptor =
getDescriptor().getMessageTypes().get(5);
internal_static_BlockRecoveryCommandProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_BlockRecoveryCommandProto_descriptor,
new java.lang.String[] { "Blocks", },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.Builder.class);
internal_static_FinalizeCommandProto_descriptor =
getDescriptor().getMessageTypes().get(6);
internal_static_FinalizeCommandProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_FinalizeCommandProto_descriptor,
new java.lang.String[] { "BlockPoolId", },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder.class);
internal_static_KeyUpdateCommandProto_descriptor =
getDescriptor().getMessageTypes().get(7);
internal_static_KeyUpdateCommandProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_KeyUpdateCommandProto_descriptor,
new java.lang.String[] { "Keys", },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder.class);
internal_static_RegisterCommandProto_descriptor =
getDescriptor().getMessageTypes().get(8);
internal_static_RegisterCommandProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RegisterCommandProto_descriptor,
new java.lang.String[] { },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder.class);
internal_static_UpgradeCommandProto_descriptor =
getDescriptor().getMessageTypes().get(9);
internal_static_UpgradeCommandProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_UpgradeCommandProto_descriptor,
new java.lang.String[] { "Action", "Version", "UpgradeStatus", },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder.class);
internal_static_RegisterDatanodeRequestProto_descriptor =
getDescriptor().getMessageTypes().get(10);
internal_static_RegisterDatanodeRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RegisterDatanodeRequestProto_descriptor,
new java.lang.String[] { "Registration", },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.Builder.class);
internal_static_RegisterDatanodeResponseProto_descriptor =
getDescriptor().getMessageTypes().get(11);
internal_static_RegisterDatanodeResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RegisterDatanodeResponseProto_descriptor,
new java.lang.String[] { "Registration", },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.Builder.class);
internal_static_HeartbeatRequestProto_descriptor =
getDescriptor().getMessageTypes().get(12);
internal_static_HeartbeatRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_HeartbeatRequestProto_descriptor,
new java.lang.String[] { "Registration", "Reports", "XmitsInProgress", "XceiverCount", "FailedVolumes", },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.Builder.class);
internal_static_StorageReportProto_descriptor =
getDescriptor().getMessageTypes().get(13);
internal_static_StorageReportProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_StorageReportProto_descriptor,
new java.lang.String[] { "StorageID", "Failed", "Capacity", "DfsUsed", "Remaining", "BlockPoolUsed", },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto.Builder.class);
internal_static_NNHAStatusHeartbeatProto_descriptor =
getDescriptor().getMessageTypes().get(14);
internal_static_NNHAStatusHeartbeatProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_NNHAStatusHeartbeatProto_descriptor,
new java.lang.String[] { "State", "Txid", },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto.Builder.class);
internal_static_HeartbeatResponseProto_descriptor =
getDescriptor().getMessageTypes().get(15);
internal_static_HeartbeatResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_HeartbeatResponseProto_descriptor,
new java.lang.String[] { "Cmds", "HaStatus", },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.Builder.class);
internal_static_BlockReportRequestProto_descriptor =
getDescriptor().getMessageTypes().get(16);
internal_static_BlockReportRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_BlockReportRequestProto_descriptor,
new java.lang.String[] { "Registration", "BlockPoolId", "Reports", },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.Builder.class);
internal_static_StorageBlockReportProto_descriptor =
getDescriptor().getMessageTypes().get(17);
internal_static_StorageBlockReportProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_StorageBlockReportProto_descriptor,
new java.lang.String[] { "Storage", "Blocks", },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder.class);
internal_static_BlockReportResponseProto_descriptor =
getDescriptor().getMessageTypes().get(18);
internal_static_BlockReportResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_BlockReportResponseProto_descriptor,
new java.lang.String[] { "Cmd", },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.Builder.class);
internal_static_ReceivedDeletedBlockInfoProto_descriptor =
getDescriptor().getMessageTypes().get(19);
internal_static_ReceivedDeletedBlockInfoProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ReceivedDeletedBlockInfoProto_descriptor,
new java.lang.String[] { "Block", "Status", "DeleteHint", },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder.class);
internal_static_StorageReceivedDeletedBlocksProto_descriptor =
getDescriptor().getMessageTypes().get(20);
internal_static_StorageReceivedDeletedBlocksProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_StorageReceivedDeletedBlocksProto_descriptor,
new java.lang.String[] { "StorageID", "Blocks", },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder.class);
internal_static_BlockReceivedAndDeletedRequestProto_descriptor =
getDescriptor().getMessageTypes().get(21);
internal_static_BlockReceivedAndDeletedRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_BlockReceivedAndDeletedRequestProto_descriptor,
new java.lang.String[] { "Registration", "BlockPoolId", "Blocks", },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.Builder.class);
internal_static_BlockReceivedAndDeletedResponseProto_descriptor =
getDescriptor().getMessageTypes().get(22);
internal_static_BlockReceivedAndDeletedResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_BlockReceivedAndDeletedResponseProto_descriptor,
new java.lang.String[] { },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.Builder.class);
internal_static_ErrorReportRequestProto_descriptor =
getDescriptor().getMessageTypes().get(23);
internal_static_ErrorReportRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ErrorReportRequestProto_descriptor,
new java.lang.String[] { "Registartion", "ErrorCode", "Msg", },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.Builder.class);
internal_static_ErrorReportResponseProto_descriptor =
getDescriptor().getMessageTypes().get(24);
internal_static_ErrorReportResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ErrorReportResponseProto_descriptor,
new java.lang.String[] { },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.Builder.class);
internal_static_ProcessUpgradeRequestProto_descriptor =
getDescriptor().getMessageTypes().get(25);
internal_static_ProcessUpgradeRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ProcessUpgradeRequestProto_descriptor,
new java.lang.String[] { "Cmd", },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto.Builder.class);
internal_static_ProcessUpgradeResponseProto_descriptor =
getDescriptor().getMessageTypes().get(26);
internal_static_ProcessUpgradeResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ProcessUpgradeResponseProto_descriptor,
new java.lang.String[] { "Cmd", },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.Builder.class);
internal_static_ReportBadBlocksRequestProto_descriptor =
getDescriptor().getMessageTypes().get(27);
internal_static_ReportBadBlocksRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ReportBadBlocksRequestProto_descriptor,
new java.lang.String[] { "Blocks", },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.Builder.class);
internal_static_ReportBadBlocksResponseProto_descriptor =
getDescriptor().getMessageTypes().get(28);
internal_static_ReportBadBlocksResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ReportBadBlocksResponseProto_descriptor,
new java.lang.String[] { },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.Builder.class);
internal_static_CommitBlockSynchronizationRequestProto_descriptor =
getDescriptor().getMessageTypes().get(29);
internal_static_CommitBlockSynchronizationRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_CommitBlockSynchronizationRequestProto_descriptor,
new java.lang.String[] { "Block", "NewGenStamp", "NewLength", "CloseFile", "DeleteBlock", "NewTaragets", "NewTargetStorages", },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.Builder.class);
internal_static_CommitBlockSynchronizationResponseProto_descriptor =
getDescriptor().getMessageTypes().get(30);
internal_static_CommitBlockSynchronizationResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_CommitBlockSynchronizationResponseProto_descriptor,
new java.lang.String[] { },
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.class,
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.Builder.class);
return null;
}
};
com.google.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(),
}, assigner);
}
// @@protoc_insertion_point(outer_class_scope)
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy