Please wait. This can take some minutes ...
Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance.
Project price only 1 $
You can buy this project and download/modify it how often you want.
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos Maven / Gradle / Ivy
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: hdfs.proto
package org.apache.hadoop.hdfs.protocol.proto;
public final class HdfsProtos {
private HdfsProtos() {}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
}
/**
* Protobuf enum {@code hadoop.hdfs.StorageTypeProto}
*
*
**
* Types of recognized storage media.
*
*/
public enum StorageTypeProto
implements com.google.protobuf.ProtocolMessageEnum {
/**
* DISK = 1;
*/
DISK(0, 1),
/**
* SSD = 2;
*/
SSD(1, 2),
/**
* ARCHIVE = 3;
*/
ARCHIVE(2, 3),
/**
* RAM_DISK = 4;
*/
RAM_DISK(3, 4),
;
/**
* DISK = 1;
*/
public static final int DISK_VALUE = 1;
/**
* SSD = 2;
*/
public static final int SSD_VALUE = 2;
/**
* ARCHIVE = 3;
*/
public static final int ARCHIVE_VALUE = 3;
/**
* RAM_DISK = 4;
*/
public static final int RAM_DISK_VALUE = 4;
public final int getNumber() { return value; }
public static StorageTypeProto valueOf(int value) {
switch (value) {
case 1: return DISK;
case 2: return SSD;
case 3: return ARCHIVE;
case 4: return RAM_DISK;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public StorageTypeProto findValueByNumber(int number) {
return StorageTypeProto.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(0);
}
private static final StorageTypeProto[] VALUES = values();
public static StorageTypeProto valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private StorageTypeProto(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.StorageTypeProto)
}
/**
* Protobuf enum {@code hadoop.hdfs.CipherSuiteProto}
*
*
**
* Cipher suite.
*
*/
public enum CipherSuiteProto
implements com.google.protobuf.ProtocolMessageEnum {
/**
* UNKNOWN = 1;
*/
UNKNOWN(0, 1),
/**
* AES_CTR_NOPADDING = 2;
*/
AES_CTR_NOPADDING(1, 2),
;
/**
* UNKNOWN = 1;
*/
public static final int UNKNOWN_VALUE = 1;
/**
* AES_CTR_NOPADDING = 2;
*/
public static final int AES_CTR_NOPADDING_VALUE = 2;
public final int getNumber() { return value; }
public static CipherSuiteProto valueOf(int value) {
switch (value) {
case 1: return UNKNOWN;
case 2: return AES_CTR_NOPADDING;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public CipherSuiteProto findValueByNumber(int number) {
return CipherSuiteProto.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(1);
}
private static final CipherSuiteProto[] VALUES = values();
public static CipherSuiteProto valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private CipherSuiteProto(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.CipherSuiteProto)
}
/**
* Protobuf enum {@code hadoop.hdfs.CryptoProtocolVersionProto}
*
*
**
* Crypto protocol version used to access encrypted files.
*
*/
public enum CryptoProtocolVersionProto
implements com.google.protobuf.ProtocolMessageEnum {
/**
* UNKNOWN_PROTOCOL_VERSION = 1;
*/
UNKNOWN_PROTOCOL_VERSION(0, 1),
/**
* ENCRYPTION_ZONES = 2;
*/
ENCRYPTION_ZONES(1, 2),
;
/**
* UNKNOWN_PROTOCOL_VERSION = 1;
*/
public static final int UNKNOWN_PROTOCOL_VERSION_VALUE = 1;
/**
* ENCRYPTION_ZONES = 2;
*/
public static final int ENCRYPTION_ZONES_VALUE = 2;
public final int getNumber() { return value; }
public static CryptoProtocolVersionProto valueOf(int value) {
switch (value) {
case 1: return UNKNOWN_PROTOCOL_VERSION;
case 2: return ENCRYPTION_ZONES;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public CryptoProtocolVersionProto findValueByNumber(int number) {
return CryptoProtocolVersionProto.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(2);
}
private static final CryptoProtocolVersionProto[] VALUES = values();
public static CryptoProtocolVersionProto valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private CryptoProtocolVersionProto(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.CryptoProtocolVersionProto)
}
/**
* Protobuf enum {@code hadoop.hdfs.ChecksumTypeProto}
*
*
**
* Checksum algorithms/types used in HDFS
* Make sure this enum's integer values match enum values' id properties defined
* in org.apache.hadoop.util.DataChecksum.Type
*
*/
public enum ChecksumTypeProto
implements com.google.protobuf.ProtocolMessageEnum {
/**
* CHECKSUM_NULL = 0;
*/
CHECKSUM_NULL(0, 0),
/**
* CHECKSUM_CRC32 = 1;
*/
CHECKSUM_CRC32(1, 1),
/**
* CHECKSUM_CRC32C = 2;
*/
CHECKSUM_CRC32C(2, 2),
;
/**
* CHECKSUM_NULL = 0;
*/
public static final int CHECKSUM_NULL_VALUE = 0;
/**
* CHECKSUM_CRC32 = 1;
*/
public static final int CHECKSUM_CRC32_VALUE = 1;
/**
* CHECKSUM_CRC32C = 2;
*/
public static final int CHECKSUM_CRC32C_VALUE = 2;
public final int getNumber() { return value; }
public static ChecksumTypeProto valueOf(int value) {
switch (value) {
case 0: return CHECKSUM_NULL;
case 1: return CHECKSUM_CRC32;
case 2: return CHECKSUM_CRC32C;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public ChecksumTypeProto findValueByNumber(int number) {
return ChecksumTypeProto.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(3);
}
private static final ChecksumTypeProto[] VALUES = values();
public static ChecksumTypeProto valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private ChecksumTypeProto(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.ChecksumTypeProto)
}
/**
* Protobuf enum {@code hadoop.hdfs.ReplicaStateProto}
*
*
**
* State of a block replica at a datanode
*
*/
public enum ReplicaStateProto
implements com.google.protobuf.ProtocolMessageEnum {
/**
* FINALIZED = 0;
*
*
* State of a replica when it is not modified
*
*/
FINALIZED(0, 0),
/**
* RBW = 1;
*
*
* State of replica that is being written to
*
*/
RBW(1, 1),
/**
* RWR = 2;
*
*
* State of replica that is waiting to be recovered
*
*/
RWR(2, 2),
/**
* RUR = 3;
*
*
* State of replica that is under recovery
*
*/
RUR(3, 3),
/**
* TEMPORARY = 4;
*
*
* State of replica that is created for replication
*
*/
TEMPORARY(4, 4),
;
/**
* FINALIZED = 0;
*
*
* State of a replica when it is not modified
*
*/
public static final int FINALIZED_VALUE = 0;
/**
* RBW = 1;
*
*
* State of replica that is being written to
*
*/
public static final int RBW_VALUE = 1;
/**
* RWR = 2;
*
*
* State of replica that is waiting to be recovered
*
*/
public static final int RWR_VALUE = 2;
/**
* RUR = 3;
*
*
* State of replica that is under recovery
*
*/
public static final int RUR_VALUE = 3;
/**
* TEMPORARY = 4;
*
*
* State of replica that is created for replication
*
*/
public static final int TEMPORARY_VALUE = 4;
public final int getNumber() { return value; }
public static ReplicaStateProto valueOf(int value) {
switch (value) {
case 0: return FINALIZED;
case 1: return RBW;
case 2: return RWR;
case 3: return RUR;
case 4: return TEMPORARY;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public ReplicaStateProto findValueByNumber(int number) {
return ReplicaStateProto.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(4);
}
private static final ReplicaStateProto[] VALUES = values();
public static ReplicaStateProto valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private ReplicaStateProto(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.ReplicaStateProto)
}
public interface ExtendedBlockProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string poolId = 1;
/**
* required string poolId = 1;
*
*
* Block pool id - gloablly unique across clusters
*
*/
boolean hasPoolId();
/**
* required string poolId = 1;
*
*
* Block pool id - gloablly unique across clusters
*
*/
java.lang.String getPoolId();
/**
* required string poolId = 1;
*
*
* Block pool id - gloablly unique across clusters
*
*/
com.google.protobuf.ByteString
getPoolIdBytes();
// required uint64 blockId = 2;
/**
* required uint64 blockId = 2;
*
*
* the local id within a pool
*
*/
boolean hasBlockId();
/**
* required uint64 blockId = 2;
*
*
* the local id within a pool
*
*/
long getBlockId();
// required uint64 generationStamp = 3;
/**
* required uint64 generationStamp = 3;
*/
boolean hasGenerationStamp();
/**
* required uint64 generationStamp = 3;
*/
long getGenerationStamp();
// optional uint64 numBytes = 4 [default = 0];
/**
* optional uint64 numBytes = 4 [default = 0];
*
*
* len does not belong in ebid
*
*/
boolean hasNumBytes();
/**
* optional uint64 numBytes = 4 [default = 0];
*
*
* len does not belong in ebid
*
*/
long getNumBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.ExtendedBlockProto}
*
*
**
* Extended block idenfies a block
*
*/
public static final class ExtendedBlockProto extends
com.google.protobuf.GeneratedMessage
implements ExtendedBlockProtoOrBuilder {
// Use ExtendedBlockProto.newBuilder() to construct.
private ExtendedBlockProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ExtendedBlockProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ExtendedBlockProto defaultInstance;
public static ExtendedBlockProto getDefaultInstance() {
return defaultInstance;
}
public ExtendedBlockProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ExtendedBlockProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
poolId_ = input.readBytes();
break;
}
case 16: {
bitField0_ |= 0x00000002;
blockId_ = input.readUInt64();
break;
}
case 24: {
bitField0_ |= 0x00000004;
generationStamp_ = input.readUInt64();
break;
}
case 32: {
bitField0_ |= 0x00000008;
numBytes_ = input.readUInt64();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExtendedBlockProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExtendedBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ExtendedBlockProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ExtendedBlockProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required string poolId = 1;
public static final int POOLID_FIELD_NUMBER = 1;
private java.lang.Object poolId_;
/**
* required string poolId = 1;
*
*
* Block pool id - gloablly unique across clusters
*
*/
public boolean hasPoolId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string poolId = 1;
*
*
* Block pool id - gloablly unique across clusters
*
*/
public java.lang.String getPoolId() {
java.lang.Object ref = poolId_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
poolId_ = s;
}
return s;
}
}
/**
* required string poolId = 1;
*
*
* Block pool id - gloablly unique across clusters
*
*/
public com.google.protobuf.ByteString
getPoolIdBytes() {
java.lang.Object ref = poolId_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
poolId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required uint64 blockId = 2;
public static final int BLOCKID_FIELD_NUMBER = 2;
private long blockId_;
/**
* required uint64 blockId = 2;
*
*
* the local id within a pool
*
*/
public boolean hasBlockId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 blockId = 2;
*
*
* the local id within a pool
*
*/
public long getBlockId() {
return blockId_;
}
// required uint64 generationStamp = 3;
public static final int GENERATIONSTAMP_FIELD_NUMBER = 3;
private long generationStamp_;
/**
* required uint64 generationStamp = 3;
*/
public boolean hasGenerationStamp() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 generationStamp = 3;
*/
public long getGenerationStamp() {
return generationStamp_;
}
// optional uint64 numBytes = 4 [default = 0];
public static final int NUMBYTES_FIELD_NUMBER = 4;
private long numBytes_;
/**
* optional uint64 numBytes = 4 [default = 0];
*
*
* len does not belong in ebid
*
*/
public boolean hasNumBytes() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional uint64 numBytes = 4 [default = 0];
*
*
* len does not belong in ebid
*
*/
public long getNumBytes() {
return numBytes_;
}
private void initFields() {
poolId_ = "";
blockId_ = 0L;
generationStamp_ = 0L;
numBytes_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasPoolId()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasBlockId()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasGenerationStamp()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getPoolIdBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, blockId_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt64(3, generationStamp_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt64(4, numBytes_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getPoolIdBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, blockId_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(3, generationStamp_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(4, numBytes_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto) obj;
boolean result = true;
result = result && (hasPoolId() == other.hasPoolId());
if (hasPoolId()) {
result = result && getPoolId()
.equals(other.getPoolId());
}
result = result && (hasBlockId() == other.hasBlockId());
if (hasBlockId()) {
result = result && (getBlockId()
== other.getBlockId());
}
result = result && (hasGenerationStamp() == other.hasGenerationStamp());
if (hasGenerationStamp()) {
result = result && (getGenerationStamp()
== other.getGenerationStamp());
}
result = result && (hasNumBytes() == other.hasNumBytes());
if (hasNumBytes()) {
result = result && (getNumBytes()
== other.getNumBytes());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasPoolId()) {
hash = (37 * hash) + POOLID_FIELD_NUMBER;
hash = (53 * hash) + getPoolId().hashCode();
}
if (hasBlockId()) {
hash = (37 * hash) + BLOCKID_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getBlockId());
}
if (hasGenerationStamp()) {
hash = (37 * hash) + GENERATIONSTAMP_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getGenerationStamp());
}
if (hasNumBytes()) {
hash = (37 * hash) + NUMBYTES_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getNumBytes());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ExtendedBlockProto}
*
*
**
* Extended block idenfies a block
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExtendedBlockProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExtendedBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
poolId_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
blockId_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
generationStamp_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
numBytes_ = 0L;
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExtendedBlockProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.poolId_ = poolId_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.blockId_ = blockId_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.generationStamp_ = generationStamp_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.numBytes_ = numBytes_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) return this;
if (other.hasPoolId()) {
bitField0_ |= 0x00000001;
poolId_ = other.poolId_;
onChanged();
}
if (other.hasBlockId()) {
setBlockId(other.getBlockId());
}
if (other.hasGenerationStamp()) {
setGenerationStamp(other.getGenerationStamp());
}
if (other.hasNumBytes()) {
setNumBytes(other.getNumBytes());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasPoolId()) {
return false;
}
if (!hasBlockId()) {
return false;
}
if (!hasGenerationStamp()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string poolId = 1;
private java.lang.Object poolId_ = "";
/**
* required string poolId = 1;
*
*
* Block pool id - gloablly unique across clusters
*
*/
public boolean hasPoolId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string poolId = 1;
*
*
* Block pool id - gloablly unique across clusters
*
*/
public java.lang.String getPoolId() {
java.lang.Object ref = poolId_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
poolId_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string poolId = 1;
*
*
* Block pool id - gloablly unique across clusters
*
*/
public com.google.protobuf.ByteString
getPoolIdBytes() {
java.lang.Object ref = poolId_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
poolId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string poolId = 1;
*
*
* Block pool id - gloablly unique across clusters
*
*/
public Builder setPoolId(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
poolId_ = value;
onChanged();
return this;
}
/**
* required string poolId = 1;
*
*
* Block pool id - gloablly unique across clusters
*
*/
public Builder clearPoolId() {
bitField0_ = (bitField0_ & ~0x00000001);
poolId_ = getDefaultInstance().getPoolId();
onChanged();
return this;
}
/**
* required string poolId = 1;
*
*
* Block pool id - gloablly unique across clusters
*
*/
public Builder setPoolIdBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
poolId_ = value;
onChanged();
return this;
}
// required uint64 blockId = 2;
private long blockId_ ;
/**
* required uint64 blockId = 2;
*
*
* the local id within a pool
*
*/
public boolean hasBlockId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 blockId = 2;
*
*
* the local id within a pool
*
*/
public long getBlockId() {
return blockId_;
}
/**
* required uint64 blockId = 2;
*
*
* the local id within a pool
*
*/
public Builder setBlockId(long value) {
bitField0_ |= 0x00000002;
blockId_ = value;
onChanged();
return this;
}
/**
* required uint64 blockId = 2;
*
*
* the local id within a pool
*
*/
public Builder clearBlockId() {
bitField0_ = (bitField0_ & ~0x00000002);
blockId_ = 0L;
onChanged();
return this;
}
// required uint64 generationStamp = 3;
private long generationStamp_ ;
/**
* required uint64 generationStamp = 3;
*/
public boolean hasGenerationStamp() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 generationStamp = 3;
*/
public long getGenerationStamp() {
return generationStamp_;
}
/**
* required uint64 generationStamp = 3;
*/
public Builder setGenerationStamp(long value) {
bitField0_ |= 0x00000004;
generationStamp_ = value;
onChanged();
return this;
}
/**
* required uint64 generationStamp = 3;
*/
public Builder clearGenerationStamp() {
bitField0_ = (bitField0_ & ~0x00000004);
generationStamp_ = 0L;
onChanged();
return this;
}
// optional uint64 numBytes = 4 [default = 0];
private long numBytes_ ;
/**
* optional uint64 numBytes = 4 [default = 0];
*
*
* len does not belong in ebid
*
*/
public boolean hasNumBytes() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional uint64 numBytes = 4 [default = 0];
*
*
* len does not belong in ebid
*
*/
public long getNumBytes() {
return numBytes_;
}
/**
* optional uint64 numBytes = 4 [default = 0];
*
*
* len does not belong in ebid
*
*/
public Builder setNumBytes(long value) {
bitField0_ |= 0x00000008;
numBytes_ = value;
onChanged();
return this;
}
/**
* optional uint64 numBytes = 4 [default = 0];
*
*
* len does not belong in ebid
*
*/
public Builder clearNumBytes() {
bitField0_ = (bitField0_ & ~0x00000008);
numBytes_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ExtendedBlockProto)
}
static {
defaultInstance = new ExtendedBlockProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ExtendedBlockProto)
}
public interface DatanodeIDProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string ipAddr = 1;
/**
* required string ipAddr = 1;
*
*
* IP address
*
*/
boolean hasIpAddr();
/**
* required string ipAddr = 1;
*
*
* IP address
*
*/
java.lang.String getIpAddr();
/**
* required string ipAddr = 1;
*
*
* IP address
*
*/
com.google.protobuf.ByteString
getIpAddrBytes();
// required string hostName = 2;
/**
* required string hostName = 2;
*
*
* hostname
*
*/
boolean hasHostName();
/**
* required string hostName = 2;
*
*
* hostname
*
*/
java.lang.String getHostName();
/**
* required string hostName = 2;
*
*
* hostname
*
*/
com.google.protobuf.ByteString
getHostNameBytes();
// required string datanodeUuid = 3;
/**
* required string datanodeUuid = 3;
*
*
* UUID assigned to the Datanode. For
*
*/
boolean hasDatanodeUuid();
/**
* required string datanodeUuid = 3;
*
*
* UUID assigned to the Datanode. For
*
*/
java.lang.String getDatanodeUuid();
/**
* required string datanodeUuid = 3;
*
*
* UUID assigned to the Datanode. For
*
*/
com.google.protobuf.ByteString
getDatanodeUuidBytes();
// required uint32 xferPort = 4;
/**
* required uint32 xferPort = 4;
*
*
* upgraded clusters this is the same
* as the original StorageID of the
* Datanode.
*
*/
boolean hasXferPort();
/**
* required uint32 xferPort = 4;
*
*
* upgraded clusters this is the same
* as the original StorageID of the
* Datanode.
*
*/
int getXferPort();
// required uint32 infoPort = 5;
/**
* required uint32 infoPort = 5;
*
*
* datanode http port
*
*/
boolean hasInfoPort();
/**
* required uint32 infoPort = 5;
*
*
* datanode http port
*
*/
int getInfoPort();
// required uint32 ipcPort = 6;
/**
* required uint32 ipcPort = 6;
*
*
* ipc server port
*
*/
boolean hasIpcPort();
/**
* required uint32 ipcPort = 6;
*
*
* ipc server port
*
*/
int getIpcPort();
// optional uint32 infoSecurePort = 7 [default = 0];
/**
* optional uint32 infoSecurePort = 7 [default = 0];
*
*
* datanode https port
*
*/
boolean hasInfoSecurePort();
/**
* optional uint32 infoSecurePort = 7 [default = 0];
*
*
* datanode https port
*
*/
int getInfoSecurePort();
}
/**
* Protobuf type {@code hadoop.hdfs.DatanodeIDProto}
*
*
**
* Identifies a Datanode
*
*/
public static final class DatanodeIDProto extends
com.google.protobuf.GeneratedMessage
implements DatanodeIDProtoOrBuilder {
// Use DatanodeIDProto.newBuilder() to construct.
private DatanodeIDProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private DatanodeIDProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final DatanodeIDProto defaultInstance;
public static DatanodeIDProto getDefaultInstance() {
return defaultInstance;
}
public DatanodeIDProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DatanodeIDProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
ipAddr_ = input.readBytes();
break;
}
case 18: {
bitField0_ |= 0x00000002;
hostName_ = input.readBytes();
break;
}
case 26: {
bitField0_ |= 0x00000004;
datanodeUuid_ = input.readBytes();
break;
}
case 32: {
bitField0_ |= 0x00000008;
xferPort_ = input.readUInt32();
break;
}
case 40: {
bitField0_ |= 0x00000010;
infoPort_ = input.readUInt32();
break;
}
case 48: {
bitField0_ |= 0x00000020;
ipcPort_ = input.readUInt32();
break;
}
case 56: {
bitField0_ |= 0x00000040;
infoSecurePort_ = input.readUInt32();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeIDProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeIDProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public DatanodeIDProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new DatanodeIDProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required string ipAddr = 1;
public static final int IPADDR_FIELD_NUMBER = 1;
private java.lang.Object ipAddr_;
/**
* required string ipAddr = 1;
*
*
* IP address
*
*/
public boolean hasIpAddr() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string ipAddr = 1;
*
*
* IP address
*
*/
public java.lang.String getIpAddr() {
java.lang.Object ref = ipAddr_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
ipAddr_ = s;
}
return s;
}
}
/**
* required string ipAddr = 1;
*
*
* IP address
*
*/
public com.google.protobuf.ByteString
getIpAddrBytes() {
java.lang.Object ref = ipAddr_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
ipAddr_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required string hostName = 2;
public static final int HOSTNAME_FIELD_NUMBER = 2;
private java.lang.Object hostName_;
/**
* required string hostName = 2;
*
*
* hostname
*
*/
public boolean hasHostName() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string hostName = 2;
*
*
* hostname
*
*/
public java.lang.String getHostName() {
java.lang.Object ref = hostName_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
hostName_ = s;
}
return s;
}
}
/**
* required string hostName = 2;
*
*
* hostname
*
*/
public com.google.protobuf.ByteString
getHostNameBytes() {
java.lang.Object ref = hostName_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
hostName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required string datanodeUuid = 3;
public static final int DATANODEUUID_FIELD_NUMBER = 3;
private java.lang.Object datanodeUuid_;
/**
* required string datanodeUuid = 3;
*
*
* UUID assigned to the Datanode. For
*
*/
public boolean hasDatanodeUuid() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required string datanodeUuid = 3;
*
*
* UUID assigned to the Datanode. For
*
*/
public java.lang.String getDatanodeUuid() {
java.lang.Object ref = datanodeUuid_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
datanodeUuid_ = s;
}
return s;
}
}
/**
* required string datanodeUuid = 3;
*
*
* UUID assigned to the Datanode. For
*
*/
public com.google.protobuf.ByteString
getDatanodeUuidBytes() {
java.lang.Object ref = datanodeUuid_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
datanodeUuid_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required uint32 xferPort = 4;
public static final int XFERPORT_FIELD_NUMBER = 4;
private int xferPort_;
/**
* required uint32 xferPort = 4;
*
*
* upgraded clusters this is the same
* as the original StorageID of the
* Datanode.
*
*/
public boolean hasXferPort() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required uint32 xferPort = 4;
*
*
* upgraded clusters this is the same
* as the original StorageID of the
* Datanode.
*
*/
public int getXferPort() {
return xferPort_;
}
// required uint32 infoPort = 5;
public static final int INFOPORT_FIELD_NUMBER = 5;
private int infoPort_;
/**
* required uint32 infoPort = 5;
*
*
* datanode http port
*
*/
public boolean hasInfoPort() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required uint32 infoPort = 5;
*
*
* datanode http port
*
*/
public int getInfoPort() {
return infoPort_;
}
// required uint32 ipcPort = 6;
public static final int IPCPORT_FIELD_NUMBER = 6;
private int ipcPort_;
/**
* required uint32 ipcPort = 6;
*
*
* ipc server port
*
*/
public boolean hasIpcPort() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* required uint32 ipcPort = 6;
*
*
* ipc server port
*
*/
public int getIpcPort() {
return ipcPort_;
}
// optional uint32 infoSecurePort = 7 [default = 0];
public static final int INFOSECUREPORT_FIELD_NUMBER = 7;
private int infoSecurePort_;
/**
* optional uint32 infoSecurePort = 7 [default = 0];
*
*
* datanode https port
*
*/
public boolean hasInfoSecurePort() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional uint32 infoSecurePort = 7 [default = 0];
*
*
* datanode https port
*
*/
public int getInfoSecurePort() {
return infoSecurePort_;
}
private void initFields() {
ipAddr_ = "";
hostName_ = "";
datanodeUuid_ = "";
xferPort_ = 0;
infoPort_ = 0;
ipcPort_ = 0;
infoSecurePort_ = 0;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasIpAddr()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasHostName()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasDatanodeUuid()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasXferPort()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasInfoPort()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasIpcPort()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getIpAddrBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getHostNameBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, getDatanodeUuidBytes());
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt32(4, xferPort_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeUInt32(5, infoPort_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeUInt32(6, ipcPort_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeUInt32(7, infoSecurePort_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getIpAddrBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getHostNameBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, getDatanodeUuidBytes());
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(4, xferPort_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(5, infoPort_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(6, ipcPort_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(7, infoSecurePort_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto) obj;
boolean result = true;
result = result && (hasIpAddr() == other.hasIpAddr());
if (hasIpAddr()) {
result = result && getIpAddr()
.equals(other.getIpAddr());
}
result = result && (hasHostName() == other.hasHostName());
if (hasHostName()) {
result = result && getHostName()
.equals(other.getHostName());
}
result = result && (hasDatanodeUuid() == other.hasDatanodeUuid());
if (hasDatanodeUuid()) {
result = result && getDatanodeUuid()
.equals(other.getDatanodeUuid());
}
result = result && (hasXferPort() == other.hasXferPort());
if (hasXferPort()) {
result = result && (getXferPort()
== other.getXferPort());
}
result = result && (hasInfoPort() == other.hasInfoPort());
if (hasInfoPort()) {
result = result && (getInfoPort()
== other.getInfoPort());
}
result = result && (hasIpcPort() == other.hasIpcPort());
if (hasIpcPort()) {
result = result && (getIpcPort()
== other.getIpcPort());
}
result = result && (hasInfoSecurePort() == other.hasInfoSecurePort());
if (hasInfoSecurePort()) {
result = result && (getInfoSecurePort()
== other.getInfoSecurePort());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasIpAddr()) {
hash = (37 * hash) + IPADDR_FIELD_NUMBER;
hash = (53 * hash) + getIpAddr().hashCode();
}
if (hasHostName()) {
hash = (37 * hash) + HOSTNAME_FIELD_NUMBER;
hash = (53 * hash) + getHostName().hashCode();
}
if (hasDatanodeUuid()) {
hash = (37 * hash) + DATANODEUUID_FIELD_NUMBER;
hash = (53 * hash) + getDatanodeUuid().hashCode();
}
if (hasXferPort()) {
hash = (37 * hash) + XFERPORT_FIELD_NUMBER;
hash = (53 * hash) + getXferPort();
}
if (hasInfoPort()) {
hash = (37 * hash) + INFOPORT_FIELD_NUMBER;
hash = (53 * hash) + getInfoPort();
}
if (hasIpcPort()) {
hash = (37 * hash) + IPCPORT_FIELD_NUMBER;
hash = (53 * hash) + getIpcPort();
}
if (hasInfoSecurePort()) {
hash = (37 * hash) + INFOSECUREPORT_FIELD_NUMBER;
hash = (53 * hash) + getInfoSecurePort();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.DatanodeIDProto}
*
*
**
* Identifies a Datanode
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeIDProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeIDProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
ipAddr_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
hostName_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
datanodeUuid_ = "";
bitField0_ = (bitField0_ & ~0x00000004);
xferPort_ = 0;
bitField0_ = (bitField0_ & ~0x00000008);
infoPort_ = 0;
bitField0_ = (bitField0_ & ~0x00000010);
ipcPort_ = 0;
bitField0_ = (bitField0_ & ~0x00000020);
infoSecurePort_ = 0;
bitField0_ = (bitField0_ & ~0x00000040);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeIDProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.ipAddr_ = ipAddr_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.hostName_ = hostName_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.datanodeUuid_ = datanodeUuid_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.xferPort_ = xferPort_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.infoPort_ = infoPort_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000020;
}
result.ipcPort_ = ipcPort_;
if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
to_bitField0_ |= 0x00000040;
}
result.infoSecurePort_ = infoSecurePort_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()) return this;
if (other.hasIpAddr()) {
bitField0_ |= 0x00000001;
ipAddr_ = other.ipAddr_;
onChanged();
}
if (other.hasHostName()) {
bitField0_ |= 0x00000002;
hostName_ = other.hostName_;
onChanged();
}
if (other.hasDatanodeUuid()) {
bitField0_ |= 0x00000004;
datanodeUuid_ = other.datanodeUuid_;
onChanged();
}
if (other.hasXferPort()) {
setXferPort(other.getXferPort());
}
if (other.hasInfoPort()) {
setInfoPort(other.getInfoPort());
}
if (other.hasIpcPort()) {
setIpcPort(other.getIpcPort());
}
if (other.hasInfoSecurePort()) {
setInfoSecurePort(other.getInfoSecurePort());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasIpAddr()) {
return false;
}
if (!hasHostName()) {
return false;
}
if (!hasDatanodeUuid()) {
return false;
}
if (!hasXferPort()) {
return false;
}
if (!hasInfoPort()) {
return false;
}
if (!hasIpcPort()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string ipAddr = 1;
private java.lang.Object ipAddr_ = "";
/**
* required string ipAddr = 1;
*
*
* IP address
*
*/
public boolean hasIpAddr() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string ipAddr = 1;
*
*
* IP address
*
*/
public java.lang.String getIpAddr() {
java.lang.Object ref = ipAddr_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
ipAddr_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string ipAddr = 1;
*
*
* IP address
*
*/
public com.google.protobuf.ByteString
getIpAddrBytes() {
java.lang.Object ref = ipAddr_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
ipAddr_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string ipAddr = 1;
*
*
* IP address
*
*/
public Builder setIpAddr(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
ipAddr_ = value;
onChanged();
return this;
}
/**
* required string ipAddr = 1;
*
*
* IP address
*
*/
public Builder clearIpAddr() {
bitField0_ = (bitField0_ & ~0x00000001);
ipAddr_ = getDefaultInstance().getIpAddr();
onChanged();
return this;
}
/**
* required string ipAddr = 1;
*
*
* IP address
*
*/
public Builder setIpAddrBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
ipAddr_ = value;
onChanged();
return this;
}
// required string hostName = 2;
private java.lang.Object hostName_ = "";
/**
* required string hostName = 2;
*
*
* hostname
*
*/
public boolean hasHostName() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string hostName = 2;
*
*
* hostname
*
*/
public java.lang.String getHostName() {
java.lang.Object ref = hostName_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
hostName_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string hostName = 2;
*
*
* hostname
*
*/
public com.google.protobuf.ByteString
getHostNameBytes() {
java.lang.Object ref = hostName_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
hostName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string hostName = 2;
*
*
* hostname
*
*/
public Builder setHostName(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
hostName_ = value;
onChanged();
return this;
}
/**
* required string hostName = 2;
*
*
* hostname
*
*/
public Builder clearHostName() {
bitField0_ = (bitField0_ & ~0x00000002);
hostName_ = getDefaultInstance().getHostName();
onChanged();
return this;
}
/**
* required string hostName = 2;
*
*
* hostname
*
*/
public Builder setHostNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
hostName_ = value;
onChanged();
return this;
}
// required string datanodeUuid = 3;
private java.lang.Object datanodeUuid_ = "";
/**
* required string datanodeUuid = 3;
*
*
* UUID assigned to the Datanode. For
*
*/
public boolean hasDatanodeUuid() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required string datanodeUuid = 3;
*
*
* UUID assigned to the Datanode. For
*
*/
public java.lang.String getDatanodeUuid() {
java.lang.Object ref = datanodeUuid_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
datanodeUuid_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string datanodeUuid = 3;
*
*
* UUID assigned to the Datanode. For
*
*/
public com.google.protobuf.ByteString
getDatanodeUuidBytes() {
java.lang.Object ref = datanodeUuid_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
datanodeUuid_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string datanodeUuid = 3;
*
*
* UUID assigned to the Datanode. For
*
*/
public Builder setDatanodeUuid(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
datanodeUuid_ = value;
onChanged();
return this;
}
/**
* required string datanodeUuid = 3;
*
*
* UUID assigned to the Datanode. For
*
*/
public Builder clearDatanodeUuid() {
bitField0_ = (bitField0_ & ~0x00000004);
datanodeUuid_ = getDefaultInstance().getDatanodeUuid();
onChanged();
return this;
}
/**
* required string datanodeUuid = 3;
*
*
* UUID assigned to the Datanode. For
*
*/
public Builder setDatanodeUuidBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
datanodeUuid_ = value;
onChanged();
return this;
}
// required uint32 xferPort = 4;
private int xferPort_ ;
/**
* required uint32 xferPort = 4;
*
*
* upgraded clusters this is the same
* as the original StorageID of the
* Datanode.
*
*/
public boolean hasXferPort() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required uint32 xferPort = 4;
*
*
* upgraded clusters this is the same
* as the original StorageID of the
* Datanode.
*
*/
public int getXferPort() {
return xferPort_;
}
/**
* required uint32 xferPort = 4;
*
*
* upgraded clusters this is the same
* as the original StorageID of the
* Datanode.
*
*/
public Builder setXferPort(int value) {
bitField0_ |= 0x00000008;
xferPort_ = value;
onChanged();
return this;
}
/**
* required uint32 xferPort = 4;
*
*
* upgraded clusters this is the same
* as the original StorageID of the
* Datanode.
*
*/
public Builder clearXferPort() {
bitField0_ = (bitField0_ & ~0x00000008);
xferPort_ = 0;
onChanged();
return this;
}
// required uint32 infoPort = 5;
private int infoPort_ ;
/**
* required uint32 infoPort = 5;
*
*
* datanode http port
*
*/
public boolean hasInfoPort() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required uint32 infoPort = 5;
*
*
* datanode http port
*
*/
public int getInfoPort() {
return infoPort_;
}
/**
* required uint32 infoPort = 5;
*
*
* datanode http port
*
*/
public Builder setInfoPort(int value) {
bitField0_ |= 0x00000010;
infoPort_ = value;
onChanged();
return this;
}
/**
* required uint32 infoPort = 5;
*
*
* datanode http port
*
*/
public Builder clearInfoPort() {
bitField0_ = (bitField0_ & ~0x00000010);
infoPort_ = 0;
onChanged();
return this;
}
// required uint32 ipcPort = 6;
private int ipcPort_ ;
/**
* required uint32 ipcPort = 6;
*
*
* ipc server port
*
*/
public boolean hasIpcPort() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* required uint32 ipcPort = 6;
*
*
* ipc server port
*
*/
public int getIpcPort() {
return ipcPort_;
}
/**
* required uint32 ipcPort = 6;
*
*
* ipc server port
*
*/
public Builder setIpcPort(int value) {
bitField0_ |= 0x00000020;
ipcPort_ = value;
onChanged();
return this;
}
/**
* required uint32 ipcPort = 6;
*
*
* ipc server port
*
*/
public Builder clearIpcPort() {
bitField0_ = (bitField0_ & ~0x00000020);
ipcPort_ = 0;
onChanged();
return this;
}
// optional uint32 infoSecurePort = 7 [default = 0];
private int infoSecurePort_ ;
/**
* optional uint32 infoSecurePort = 7 [default = 0];
*
*
* datanode https port
*
*/
public boolean hasInfoSecurePort() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional uint32 infoSecurePort = 7 [default = 0];
*
*
* datanode https port
*
*/
public int getInfoSecurePort() {
return infoSecurePort_;
}
/**
* optional uint32 infoSecurePort = 7 [default = 0];
*
*
* datanode https port
*
*/
public Builder setInfoSecurePort(int value) {
bitField0_ |= 0x00000040;
infoSecurePort_ = value;
onChanged();
return this;
}
/**
* optional uint32 infoSecurePort = 7 [default = 0];
*
*
* datanode https port
*
*/
public Builder clearInfoSecurePort() {
bitField0_ = (bitField0_ & ~0x00000040);
infoSecurePort_ = 0;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeIDProto)
}
static {
defaultInstance = new DatanodeIDProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeIDProto)
}
public interface DatanodeLocalInfoProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string softwareVersion = 1;
/**
* required string softwareVersion = 1;
*/
boolean hasSoftwareVersion();
/**
* required string softwareVersion = 1;
*/
java.lang.String getSoftwareVersion();
/**
* required string softwareVersion = 1;
*/
com.google.protobuf.ByteString
getSoftwareVersionBytes();
// required string configVersion = 2;
/**
* required string configVersion = 2;
*/
boolean hasConfigVersion();
/**
* required string configVersion = 2;
*/
java.lang.String getConfigVersion();
/**
* required string configVersion = 2;
*/
com.google.protobuf.ByteString
getConfigVersionBytes();
// required uint64 uptime = 3;
/**
* required uint64 uptime = 3;
*/
boolean hasUptime();
/**
* required uint64 uptime = 3;
*/
long getUptime();
}
/**
* Protobuf type {@code hadoop.hdfs.DatanodeLocalInfoProto}
*
*
**
* Datanode local information
*
*/
public static final class DatanodeLocalInfoProto extends
com.google.protobuf.GeneratedMessage
implements DatanodeLocalInfoProtoOrBuilder {
// Use DatanodeLocalInfoProto.newBuilder() to construct.
private DatanodeLocalInfoProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private DatanodeLocalInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final DatanodeLocalInfoProto defaultInstance;
public static DatanodeLocalInfoProto getDefaultInstance() {
return defaultInstance;
}
public DatanodeLocalInfoProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DatanodeLocalInfoProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
softwareVersion_ = input.readBytes();
break;
}
case 18: {
bitField0_ |= 0x00000002;
configVersion_ = input.readBytes();
break;
}
case 24: {
bitField0_ |= 0x00000004;
uptime_ = input.readUInt64();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeLocalInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeLocalInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public DatanodeLocalInfoProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new DatanodeLocalInfoProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required string softwareVersion = 1;
public static final int SOFTWAREVERSION_FIELD_NUMBER = 1;
private java.lang.Object softwareVersion_;
/**
* required string softwareVersion = 1;
*/
public boolean hasSoftwareVersion() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string softwareVersion = 1;
*/
public java.lang.String getSoftwareVersion() {
java.lang.Object ref = softwareVersion_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
softwareVersion_ = s;
}
return s;
}
}
/**
* required string softwareVersion = 1;
*/
public com.google.protobuf.ByteString
getSoftwareVersionBytes() {
java.lang.Object ref = softwareVersion_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
softwareVersion_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required string configVersion = 2;
public static final int CONFIGVERSION_FIELD_NUMBER = 2;
private java.lang.Object configVersion_;
/**
* required string configVersion = 2;
*/
public boolean hasConfigVersion() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string configVersion = 2;
*/
public java.lang.String getConfigVersion() {
java.lang.Object ref = configVersion_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
configVersion_ = s;
}
return s;
}
}
/**
* required string configVersion = 2;
*/
public com.google.protobuf.ByteString
getConfigVersionBytes() {
java.lang.Object ref = configVersion_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
configVersion_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required uint64 uptime = 3;
public static final int UPTIME_FIELD_NUMBER = 3;
private long uptime_;
/**
* required uint64 uptime = 3;
*/
public boolean hasUptime() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 uptime = 3;
*/
public long getUptime() {
return uptime_;
}
private void initFields() {
softwareVersion_ = "";
configVersion_ = "";
uptime_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasSoftwareVersion()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasConfigVersion()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasUptime()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getSoftwareVersionBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getConfigVersionBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt64(3, uptime_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getSoftwareVersionBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getConfigVersionBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(3, uptime_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto) obj;
boolean result = true;
result = result && (hasSoftwareVersion() == other.hasSoftwareVersion());
if (hasSoftwareVersion()) {
result = result && getSoftwareVersion()
.equals(other.getSoftwareVersion());
}
result = result && (hasConfigVersion() == other.hasConfigVersion());
if (hasConfigVersion()) {
result = result && getConfigVersion()
.equals(other.getConfigVersion());
}
result = result && (hasUptime() == other.hasUptime());
if (hasUptime()) {
result = result && (getUptime()
== other.getUptime());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasSoftwareVersion()) {
hash = (37 * hash) + SOFTWAREVERSION_FIELD_NUMBER;
hash = (53 * hash) + getSoftwareVersion().hashCode();
}
if (hasConfigVersion()) {
hash = (37 * hash) + CONFIGVERSION_FIELD_NUMBER;
hash = (53 * hash) + getConfigVersion().hashCode();
}
if (hasUptime()) {
hash = (37 * hash) + UPTIME_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getUptime());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.DatanodeLocalInfoProto}
*
*
**
* Datanode local information
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeLocalInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeLocalInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
softwareVersion_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
configVersion_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
uptime_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeLocalInfoProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.softwareVersion_ = softwareVersion_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.configVersion_ = configVersion_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.uptime_ = uptime_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.getDefaultInstance()) return this;
if (other.hasSoftwareVersion()) {
bitField0_ |= 0x00000001;
softwareVersion_ = other.softwareVersion_;
onChanged();
}
if (other.hasConfigVersion()) {
bitField0_ |= 0x00000002;
configVersion_ = other.configVersion_;
onChanged();
}
if (other.hasUptime()) {
setUptime(other.getUptime());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasSoftwareVersion()) {
return false;
}
if (!hasConfigVersion()) {
return false;
}
if (!hasUptime()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string softwareVersion = 1;
private java.lang.Object softwareVersion_ = "";
/**
* required string softwareVersion = 1;
*/
public boolean hasSoftwareVersion() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string softwareVersion = 1;
*/
public java.lang.String getSoftwareVersion() {
java.lang.Object ref = softwareVersion_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
softwareVersion_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string softwareVersion = 1;
*/
public com.google.protobuf.ByteString
getSoftwareVersionBytes() {
java.lang.Object ref = softwareVersion_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
softwareVersion_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string softwareVersion = 1;
*/
public Builder setSoftwareVersion(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
softwareVersion_ = value;
onChanged();
return this;
}
/**
* required string softwareVersion = 1;
*/
public Builder clearSoftwareVersion() {
bitField0_ = (bitField0_ & ~0x00000001);
softwareVersion_ = getDefaultInstance().getSoftwareVersion();
onChanged();
return this;
}
/**
* required string softwareVersion = 1;
*/
public Builder setSoftwareVersionBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
softwareVersion_ = value;
onChanged();
return this;
}
// required string configVersion = 2;
private java.lang.Object configVersion_ = "";
/**
* required string configVersion = 2;
*/
public boolean hasConfigVersion() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string configVersion = 2;
*/
public java.lang.String getConfigVersion() {
java.lang.Object ref = configVersion_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
configVersion_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string configVersion = 2;
*/
public com.google.protobuf.ByteString
getConfigVersionBytes() {
java.lang.Object ref = configVersion_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
configVersion_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string configVersion = 2;
*/
public Builder setConfigVersion(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
configVersion_ = value;
onChanged();
return this;
}
/**
* required string configVersion = 2;
*/
public Builder clearConfigVersion() {
bitField0_ = (bitField0_ & ~0x00000002);
configVersion_ = getDefaultInstance().getConfigVersion();
onChanged();
return this;
}
/**
* required string configVersion = 2;
*/
public Builder setConfigVersionBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
configVersion_ = value;
onChanged();
return this;
}
// required uint64 uptime = 3;
private long uptime_ ;
/**
* required uint64 uptime = 3;
*/
public boolean hasUptime() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 uptime = 3;
*/
public long getUptime() {
return uptime_;
}
/**
* required uint64 uptime = 3;
*/
public Builder setUptime(long value) {
bitField0_ |= 0x00000004;
uptime_ = value;
onChanged();
return this;
}
/**
* required uint64 uptime = 3;
*/
public Builder clearUptime() {
bitField0_ = (bitField0_ & ~0x00000004);
uptime_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeLocalInfoProto)
}
static {
defaultInstance = new DatanodeLocalInfoProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeLocalInfoProto)
}
public interface DatanodeInfosProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
java.util.List
getDatanodesList();
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDatanodes(int index);
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
int getDatanodesCount();
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getDatanodesOrBuilderList();
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getDatanodesOrBuilder(
int index);
}
/**
* Protobuf type {@code hadoop.hdfs.DatanodeInfosProto}
*
*
**
* DatanodeInfo array
*
*/
public static final class DatanodeInfosProto extends
com.google.protobuf.GeneratedMessage
implements DatanodeInfosProtoOrBuilder {
// Use DatanodeInfosProto.newBuilder() to construct.
private DatanodeInfosProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private DatanodeInfosProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final DatanodeInfosProto defaultInstance;
public static DatanodeInfosProto getDefaultInstance() {
return defaultInstance;
}
public DatanodeInfosProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DatanodeInfosProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
datanodes_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
datanodes_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.PARSER, extensionRegistry));
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
datanodes_ = java.util.Collections.unmodifiableList(datanodes_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfosProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfosProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public DatanodeInfosProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new DatanodeInfosProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
// repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
public static final int DATANODES_FIELD_NUMBER = 1;
private java.util.List datanodes_;
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public java.util.List getDatanodesList() {
return datanodes_;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getDatanodesOrBuilderList() {
return datanodes_;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public int getDatanodesCount() {
return datanodes_.size();
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDatanodes(int index) {
return datanodes_.get(index);
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getDatanodesOrBuilder(
int index) {
return datanodes_.get(index);
}
private void initFields() {
datanodes_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
for (int i = 0; i < getDatanodesCount(); i++) {
if (!getDatanodes(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < datanodes_.size(); i++) {
output.writeMessage(1, datanodes_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
for (int i = 0; i < datanodes_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, datanodes_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto) obj;
boolean result = true;
result = result && getDatanodesList()
.equals(other.getDatanodesList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (getDatanodesCount() > 0) {
hash = (37 * hash) + DATANODES_FIELD_NUMBER;
hash = (53 * hash) + getDatanodesList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.DatanodeInfosProto}
*
*
**
* DatanodeInfo array
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfosProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfosProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getDatanodesFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (datanodesBuilder_ == null) {
datanodes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
} else {
datanodesBuilder_.clear();
}
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfosProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto(this);
int from_bitField0_ = bitField0_;
if (datanodesBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001)) {
datanodes_ = java.util.Collections.unmodifiableList(datanodes_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.datanodes_ = datanodes_;
} else {
result.datanodes_ = datanodesBuilder_.build();
}
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance()) return this;
if (datanodesBuilder_ == null) {
if (!other.datanodes_.isEmpty()) {
if (datanodes_.isEmpty()) {
datanodes_ = other.datanodes_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureDatanodesIsMutable();
datanodes_.addAll(other.datanodes_);
}
onChanged();
}
} else {
if (!other.datanodes_.isEmpty()) {
if (datanodesBuilder_.isEmpty()) {
datanodesBuilder_.dispose();
datanodesBuilder_ = null;
datanodes_ = other.datanodes_;
bitField0_ = (bitField0_ & ~0x00000001);
datanodesBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getDatanodesFieldBuilder() : null;
} else {
datanodesBuilder_.addAllMessages(other.datanodes_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
for (int i = 0; i < getDatanodesCount(); i++) {
if (!getDatanodes(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
private java.util.List datanodes_ =
java.util.Collections.emptyList();
private void ensureDatanodesIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
datanodes_ = new java.util.ArrayList(datanodes_);
bitField0_ |= 0x00000001;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> datanodesBuilder_;
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public java.util.List getDatanodesList() {
if (datanodesBuilder_ == null) {
return java.util.Collections.unmodifiableList(datanodes_);
} else {
return datanodesBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public int getDatanodesCount() {
if (datanodesBuilder_ == null) {
return datanodes_.size();
} else {
return datanodesBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDatanodes(int index) {
if (datanodesBuilder_ == null) {
return datanodes_.get(index);
} else {
return datanodesBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public Builder setDatanodes(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (datanodesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureDatanodesIsMutable();
datanodes_.set(index, value);
onChanged();
} else {
datanodesBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public Builder setDatanodes(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (datanodesBuilder_ == null) {
ensureDatanodesIsMutable();
datanodes_.set(index, builderForValue.build());
onChanged();
} else {
datanodesBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public Builder addDatanodes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (datanodesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureDatanodesIsMutable();
datanodes_.add(value);
onChanged();
} else {
datanodesBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public Builder addDatanodes(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (datanodesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureDatanodesIsMutable();
datanodes_.add(index, value);
onChanged();
} else {
datanodesBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public Builder addDatanodes(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (datanodesBuilder_ == null) {
ensureDatanodesIsMutable();
datanodes_.add(builderForValue.build());
onChanged();
} else {
datanodesBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public Builder addDatanodes(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (datanodesBuilder_ == null) {
ensureDatanodesIsMutable();
datanodes_.add(index, builderForValue.build());
onChanged();
} else {
datanodesBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public Builder addAllDatanodes(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> values) {
if (datanodesBuilder_ == null) {
ensureDatanodesIsMutable();
super.addAll(values, datanodes_);
onChanged();
} else {
datanodesBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public Builder clearDatanodes() {
if (datanodesBuilder_ == null) {
datanodes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
} else {
datanodesBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public Builder removeDatanodes(int index) {
if (datanodesBuilder_ == null) {
ensureDatanodesIsMutable();
datanodes_.remove(index);
onChanged();
} else {
datanodesBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getDatanodesBuilder(
int index) {
return getDatanodesFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getDatanodesOrBuilder(
int index) {
if (datanodesBuilder_ == null) {
return datanodes_.get(index); } else {
return datanodesBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getDatanodesOrBuilderList() {
if (datanodesBuilder_ != null) {
return datanodesBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(datanodes_);
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addDatanodesBuilder() {
return getDatanodesFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addDatanodesBuilder(
int index) {
return getDatanodesFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public java.util.List
getDatanodesBuilderList() {
return getDatanodesFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getDatanodesFieldBuilder() {
if (datanodesBuilder_ == null) {
datanodesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>(
datanodes_,
((bitField0_ & 0x00000001) == 0x00000001),
getParentForChildren(),
isClean());
datanodes_ = null;
}
return datanodesBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeInfosProto)
}
static {
defaultInstance = new DatanodeInfosProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeInfosProto)
}
public interface DatanodeInfoProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.DatanodeIDProto id = 1;
/**
* required .hadoop.hdfs.DatanodeIDProto id = 1;
*/
boolean hasId();
/**
* required .hadoop.hdfs.DatanodeIDProto id = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getId();
/**
* required .hadoop.hdfs.DatanodeIDProto id = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getIdOrBuilder();
// optional uint64 capacity = 2 [default = 0];
/**
* optional uint64 capacity = 2 [default = 0];
*/
boolean hasCapacity();
/**
* optional uint64 capacity = 2 [default = 0];
*/
long getCapacity();
// optional uint64 dfsUsed = 3 [default = 0];
/**
* optional uint64 dfsUsed = 3 [default = 0];
*/
boolean hasDfsUsed();
/**
* optional uint64 dfsUsed = 3 [default = 0];
*/
long getDfsUsed();
// optional uint64 remaining = 4 [default = 0];
/**
* optional uint64 remaining = 4 [default = 0];
*/
boolean hasRemaining();
/**
* optional uint64 remaining = 4 [default = 0];
*/
long getRemaining();
// optional uint64 blockPoolUsed = 5 [default = 0];
/**
* optional uint64 blockPoolUsed = 5 [default = 0];
*/
boolean hasBlockPoolUsed();
/**
* optional uint64 blockPoolUsed = 5 [default = 0];
*/
long getBlockPoolUsed();
// optional uint64 lastUpdate = 6 [default = 0];
/**
* optional uint64 lastUpdate = 6 [default = 0];
*/
boolean hasLastUpdate();
/**
* optional uint64 lastUpdate = 6 [default = 0];
*/
long getLastUpdate();
// optional uint32 xceiverCount = 7 [default = 0];
/**
* optional uint32 xceiverCount = 7 [default = 0];
*/
boolean hasXceiverCount();
/**
* optional uint32 xceiverCount = 7 [default = 0];
*/
int getXceiverCount();
// optional string location = 8;
/**
* optional string location = 8;
*/
boolean hasLocation();
/**
* optional string location = 8;
*/
java.lang.String getLocation();
/**
* optional string location = 8;
*/
com.google.protobuf.ByteString
getLocationBytes();
// optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL];
/**
* optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL];
*/
boolean hasAdminState();
/**
* optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL];
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState getAdminState();
// optional uint64 cacheCapacity = 11 [default = 0];
/**
* optional uint64 cacheCapacity = 11 [default = 0];
*/
boolean hasCacheCapacity();
/**
* optional uint64 cacheCapacity = 11 [default = 0];
*/
long getCacheCapacity();
// optional uint64 cacheUsed = 12 [default = 0];
/**
* optional uint64 cacheUsed = 12 [default = 0];
*/
boolean hasCacheUsed();
/**
* optional uint64 cacheUsed = 12 [default = 0];
*/
long getCacheUsed();
// optional uint64 lastUpdateMonotonic = 13 [default = 0];
/**
* optional uint64 lastUpdateMonotonic = 13 [default = 0];
*/
boolean hasLastUpdateMonotonic();
/**
* optional uint64 lastUpdateMonotonic = 13 [default = 0];
*/
long getLastUpdateMonotonic();
}
/**
* Protobuf type {@code hadoop.hdfs.DatanodeInfoProto}
*
*
**
* The status of a Datanode
*
*/
public static final class DatanodeInfoProto extends
com.google.protobuf.GeneratedMessage
implements DatanodeInfoProtoOrBuilder {
// Use DatanodeInfoProto.newBuilder() to construct.
private DatanodeInfoProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private DatanodeInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final DatanodeInfoProto defaultInstance;
public static DatanodeInfoProto getDefaultInstance() {
return defaultInstance;
}
public DatanodeInfoProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DatanodeInfoProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = id_.toBuilder();
}
id_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(id_);
id_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 16: {
bitField0_ |= 0x00000002;
capacity_ = input.readUInt64();
break;
}
case 24: {
bitField0_ |= 0x00000004;
dfsUsed_ = input.readUInt64();
break;
}
case 32: {
bitField0_ |= 0x00000008;
remaining_ = input.readUInt64();
break;
}
case 40: {
bitField0_ |= 0x00000010;
blockPoolUsed_ = input.readUInt64();
break;
}
case 48: {
bitField0_ |= 0x00000020;
lastUpdate_ = input.readUInt64();
break;
}
case 56: {
bitField0_ |= 0x00000040;
xceiverCount_ = input.readUInt32();
break;
}
case 66: {
bitField0_ |= 0x00000080;
location_ = input.readBytes();
break;
}
case 80: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(10, rawValue);
} else {
bitField0_ |= 0x00000100;
adminState_ = value;
}
break;
}
case 88: {
bitField0_ |= 0x00000200;
cacheCapacity_ = input.readUInt64();
break;
}
case 96: {
bitField0_ |= 0x00000400;
cacheUsed_ = input.readUInt64();
break;
}
case 104: {
bitField0_ |= 0x00000800;
lastUpdateMonotonic_ = input.readUInt64();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public DatanodeInfoProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new DatanodeInfoProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
/**
* Protobuf enum {@code hadoop.hdfs.DatanodeInfoProto.AdminState}
*/
public enum AdminState
implements com.google.protobuf.ProtocolMessageEnum {
/**
* NORMAL = 0;
*/
NORMAL(0, 0),
/**
* DECOMMISSION_INPROGRESS = 1;
*/
DECOMMISSION_INPROGRESS(1, 1),
/**
* DECOMMISSIONED = 2;
*/
DECOMMISSIONED(2, 2),
;
/**
* NORMAL = 0;
*/
public static final int NORMAL_VALUE = 0;
/**
* DECOMMISSION_INPROGRESS = 1;
*/
public static final int DECOMMISSION_INPROGRESS_VALUE = 1;
/**
* DECOMMISSIONED = 2;
*/
public static final int DECOMMISSIONED_VALUE = 2;
public final int getNumber() { return value; }
public static AdminState valueOf(int value) {
switch (value) {
case 0: return NORMAL;
case 1: return DECOMMISSION_INPROGRESS;
case 2: return DECOMMISSIONED;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public AdminState findValueByNumber(int number) {
return AdminState.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDescriptor().getEnumTypes().get(0);
}
private static final AdminState[] VALUES = values();
public static AdminState valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private AdminState(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.DatanodeInfoProto.AdminState)
}
private int bitField0_;
// required .hadoop.hdfs.DatanodeIDProto id = 1;
public static final int ID_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto id_;
/**
* required .hadoop.hdfs.DatanodeIDProto id = 1;
*/
public boolean hasId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.DatanodeIDProto id = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getId() {
return id_;
}
/**
* required .hadoop.hdfs.DatanodeIDProto id = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getIdOrBuilder() {
return id_;
}
// optional uint64 capacity = 2 [default = 0];
public static final int CAPACITY_FIELD_NUMBER = 2;
private long capacity_;
/**
* optional uint64 capacity = 2 [default = 0];
*/
public boolean hasCapacity() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional uint64 capacity = 2 [default = 0];
*/
public long getCapacity() {
return capacity_;
}
// optional uint64 dfsUsed = 3 [default = 0];
public static final int DFSUSED_FIELD_NUMBER = 3;
private long dfsUsed_;
/**
* optional uint64 dfsUsed = 3 [default = 0];
*/
public boolean hasDfsUsed() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional uint64 dfsUsed = 3 [default = 0];
*/
public long getDfsUsed() {
return dfsUsed_;
}
// optional uint64 remaining = 4 [default = 0];
public static final int REMAINING_FIELD_NUMBER = 4;
private long remaining_;
/**
* optional uint64 remaining = 4 [default = 0];
*/
public boolean hasRemaining() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional uint64 remaining = 4 [default = 0];
*/
public long getRemaining() {
return remaining_;
}
// optional uint64 blockPoolUsed = 5 [default = 0];
public static final int BLOCKPOOLUSED_FIELD_NUMBER = 5;
private long blockPoolUsed_;
/**
* optional uint64 blockPoolUsed = 5 [default = 0];
*/
public boolean hasBlockPoolUsed() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional uint64 blockPoolUsed = 5 [default = 0];
*/
public long getBlockPoolUsed() {
return blockPoolUsed_;
}
// optional uint64 lastUpdate = 6 [default = 0];
public static final int LASTUPDATE_FIELD_NUMBER = 6;
private long lastUpdate_;
/**
* optional uint64 lastUpdate = 6 [default = 0];
*/
public boolean hasLastUpdate() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional uint64 lastUpdate = 6 [default = 0];
*/
public long getLastUpdate() {
return lastUpdate_;
}
// optional uint32 xceiverCount = 7 [default = 0];
public static final int XCEIVERCOUNT_FIELD_NUMBER = 7;
private int xceiverCount_;
/**
* optional uint32 xceiverCount = 7 [default = 0];
*/
public boolean hasXceiverCount() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional uint32 xceiverCount = 7 [default = 0];
*/
public int getXceiverCount() {
return xceiverCount_;
}
// optional string location = 8;
public static final int LOCATION_FIELD_NUMBER = 8;
private java.lang.Object location_;
/**
* optional string location = 8;
*/
public boolean hasLocation() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* optional string location = 8;
*/
public java.lang.String getLocation() {
java.lang.Object ref = location_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
location_ = s;
}
return s;
}
}
/**
* optional string location = 8;
*/
public com.google.protobuf.ByteString
getLocationBytes() {
java.lang.Object ref = location_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
location_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL];
public static final int ADMINSTATE_FIELD_NUMBER = 10;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState adminState_;
/**
* optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL];
*/
public boolean hasAdminState() {
return ((bitField0_ & 0x00000100) == 0x00000100);
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL];
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState getAdminState() {
return adminState_;
}
// optional uint64 cacheCapacity = 11 [default = 0];
public static final int CACHECAPACITY_FIELD_NUMBER = 11;
private long cacheCapacity_;
/**
* optional uint64 cacheCapacity = 11 [default = 0];
*/
public boolean hasCacheCapacity() {
return ((bitField0_ & 0x00000200) == 0x00000200);
}
/**
* optional uint64 cacheCapacity = 11 [default = 0];
*/
public long getCacheCapacity() {
return cacheCapacity_;
}
// optional uint64 cacheUsed = 12 [default = 0];
public static final int CACHEUSED_FIELD_NUMBER = 12;
private long cacheUsed_;
/**
* optional uint64 cacheUsed = 12 [default = 0];
*/
public boolean hasCacheUsed() {
return ((bitField0_ & 0x00000400) == 0x00000400);
}
/**
* optional uint64 cacheUsed = 12 [default = 0];
*/
public long getCacheUsed() {
return cacheUsed_;
}
// optional uint64 lastUpdateMonotonic = 13 [default = 0];
public static final int LASTUPDATEMONOTONIC_FIELD_NUMBER = 13;
private long lastUpdateMonotonic_;
/**
* optional uint64 lastUpdateMonotonic = 13 [default = 0];
*/
public boolean hasLastUpdateMonotonic() {
return ((bitField0_ & 0x00000800) == 0x00000800);
}
/**
* optional uint64 lastUpdateMonotonic = 13 [default = 0];
*/
public long getLastUpdateMonotonic() {
return lastUpdateMonotonic_;
}
private void initFields() {
id_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance();
capacity_ = 0L;
dfsUsed_ = 0L;
remaining_ = 0L;
blockPoolUsed_ = 0L;
lastUpdate_ = 0L;
xceiverCount_ = 0;
location_ = "";
adminState_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.NORMAL;
cacheCapacity_ = 0L;
cacheUsed_ = 0L;
lastUpdateMonotonic_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasId()) {
memoizedIsInitialized = 0;
return false;
}
if (!getId().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, id_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, capacity_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt64(3, dfsUsed_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt64(4, remaining_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeUInt64(5, blockPoolUsed_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeUInt64(6, lastUpdate_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeUInt32(7, xceiverCount_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
output.writeBytes(8, getLocationBytes());
}
if (((bitField0_ & 0x00000100) == 0x00000100)) {
output.writeEnum(10, adminState_.getNumber());
}
if (((bitField0_ & 0x00000200) == 0x00000200)) {
output.writeUInt64(11, cacheCapacity_);
}
if (((bitField0_ & 0x00000400) == 0x00000400)) {
output.writeUInt64(12, cacheUsed_);
}
if (((bitField0_ & 0x00000800) == 0x00000800)) {
output.writeUInt64(13, lastUpdateMonotonic_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, id_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, capacity_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(3, dfsUsed_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(4, remaining_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(5, blockPoolUsed_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(6, lastUpdate_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(7, xceiverCount_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(8, getLocationBytes());
}
if (((bitField0_ & 0x00000100) == 0x00000100)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(10, adminState_.getNumber());
}
if (((bitField0_ & 0x00000200) == 0x00000200)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(11, cacheCapacity_);
}
if (((bitField0_ & 0x00000400) == 0x00000400)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(12, cacheUsed_);
}
if (((bitField0_ & 0x00000800) == 0x00000800)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(13, lastUpdateMonotonic_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto) obj;
boolean result = true;
result = result && (hasId() == other.hasId());
if (hasId()) {
result = result && getId()
.equals(other.getId());
}
result = result && (hasCapacity() == other.hasCapacity());
if (hasCapacity()) {
result = result && (getCapacity()
== other.getCapacity());
}
result = result && (hasDfsUsed() == other.hasDfsUsed());
if (hasDfsUsed()) {
result = result && (getDfsUsed()
== other.getDfsUsed());
}
result = result && (hasRemaining() == other.hasRemaining());
if (hasRemaining()) {
result = result && (getRemaining()
== other.getRemaining());
}
result = result && (hasBlockPoolUsed() == other.hasBlockPoolUsed());
if (hasBlockPoolUsed()) {
result = result && (getBlockPoolUsed()
== other.getBlockPoolUsed());
}
result = result && (hasLastUpdate() == other.hasLastUpdate());
if (hasLastUpdate()) {
result = result && (getLastUpdate()
== other.getLastUpdate());
}
result = result && (hasXceiverCount() == other.hasXceiverCount());
if (hasXceiverCount()) {
result = result && (getXceiverCount()
== other.getXceiverCount());
}
result = result && (hasLocation() == other.hasLocation());
if (hasLocation()) {
result = result && getLocation()
.equals(other.getLocation());
}
result = result && (hasAdminState() == other.hasAdminState());
if (hasAdminState()) {
result = result &&
(getAdminState() == other.getAdminState());
}
result = result && (hasCacheCapacity() == other.hasCacheCapacity());
if (hasCacheCapacity()) {
result = result && (getCacheCapacity()
== other.getCacheCapacity());
}
result = result && (hasCacheUsed() == other.hasCacheUsed());
if (hasCacheUsed()) {
result = result && (getCacheUsed()
== other.getCacheUsed());
}
result = result && (hasLastUpdateMonotonic() == other.hasLastUpdateMonotonic());
if (hasLastUpdateMonotonic()) {
result = result && (getLastUpdateMonotonic()
== other.getLastUpdateMonotonic());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasId()) {
hash = (37 * hash) + ID_FIELD_NUMBER;
hash = (53 * hash) + getId().hashCode();
}
if (hasCapacity()) {
hash = (37 * hash) + CAPACITY_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getCapacity());
}
if (hasDfsUsed()) {
hash = (37 * hash) + DFSUSED_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getDfsUsed());
}
if (hasRemaining()) {
hash = (37 * hash) + REMAINING_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getRemaining());
}
if (hasBlockPoolUsed()) {
hash = (37 * hash) + BLOCKPOOLUSED_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getBlockPoolUsed());
}
if (hasLastUpdate()) {
hash = (37 * hash) + LASTUPDATE_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getLastUpdate());
}
if (hasXceiverCount()) {
hash = (37 * hash) + XCEIVERCOUNT_FIELD_NUMBER;
hash = (53 * hash) + getXceiverCount();
}
if (hasLocation()) {
hash = (37 * hash) + LOCATION_FIELD_NUMBER;
hash = (53 * hash) + getLocation().hashCode();
}
if (hasAdminState()) {
hash = (37 * hash) + ADMINSTATE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getAdminState());
}
if (hasCacheCapacity()) {
hash = (37 * hash) + CACHECAPACITY_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getCacheCapacity());
}
if (hasCacheUsed()) {
hash = (37 * hash) + CACHEUSED_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getCacheUsed());
}
if (hasLastUpdateMonotonic()) {
hash = (37 * hash) + LASTUPDATEMONOTONIC_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getLastUpdateMonotonic());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.DatanodeInfoProto}
*
*
**
* The status of a Datanode
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getIdFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (idBuilder_ == null) {
id_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance();
} else {
idBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
capacity_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
dfsUsed_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
remaining_ = 0L;
bitField0_ = (bitField0_ & ~0x00000008);
blockPoolUsed_ = 0L;
bitField0_ = (bitField0_ & ~0x00000010);
lastUpdate_ = 0L;
bitField0_ = (bitField0_ & ~0x00000020);
xceiverCount_ = 0;
bitField0_ = (bitField0_ & ~0x00000040);
location_ = "";
bitField0_ = (bitField0_ & ~0x00000080);
adminState_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.NORMAL;
bitField0_ = (bitField0_ & ~0x00000100);
cacheCapacity_ = 0L;
bitField0_ = (bitField0_ & ~0x00000200);
cacheUsed_ = 0L;
bitField0_ = (bitField0_ & ~0x00000400);
lastUpdateMonotonic_ = 0L;
bitField0_ = (bitField0_ & ~0x00000800);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfoProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (idBuilder_ == null) {
result.id_ = id_;
} else {
result.id_ = idBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.capacity_ = capacity_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.dfsUsed_ = dfsUsed_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.remaining_ = remaining_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.blockPoolUsed_ = blockPoolUsed_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000020;
}
result.lastUpdate_ = lastUpdate_;
if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
to_bitField0_ |= 0x00000040;
}
result.xceiverCount_ = xceiverCount_;
if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
to_bitField0_ |= 0x00000080;
}
result.location_ = location_;
if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
to_bitField0_ |= 0x00000100;
}
result.adminState_ = adminState_;
if (((from_bitField0_ & 0x00000200) == 0x00000200)) {
to_bitField0_ |= 0x00000200;
}
result.cacheCapacity_ = cacheCapacity_;
if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
to_bitField0_ |= 0x00000400;
}
result.cacheUsed_ = cacheUsed_;
if (((from_bitField0_ & 0x00000800) == 0x00000800)) {
to_bitField0_ |= 0x00000800;
}
result.lastUpdateMonotonic_ = lastUpdateMonotonic_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()) return this;
if (other.hasId()) {
mergeId(other.getId());
}
if (other.hasCapacity()) {
setCapacity(other.getCapacity());
}
if (other.hasDfsUsed()) {
setDfsUsed(other.getDfsUsed());
}
if (other.hasRemaining()) {
setRemaining(other.getRemaining());
}
if (other.hasBlockPoolUsed()) {
setBlockPoolUsed(other.getBlockPoolUsed());
}
if (other.hasLastUpdate()) {
setLastUpdate(other.getLastUpdate());
}
if (other.hasXceiverCount()) {
setXceiverCount(other.getXceiverCount());
}
if (other.hasLocation()) {
bitField0_ |= 0x00000080;
location_ = other.location_;
onChanged();
}
if (other.hasAdminState()) {
setAdminState(other.getAdminState());
}
if (other.hasCacheCapacity()) {
setCacheCapacity(other.getCacheCapacity());
}
if (other.hasCacheUsed()) {
setCacheUsed(other.getCacheUsed());
}
if (other.hasLastUpdateMonotonic()) {
setLastUpdateMonotonic(other.getLastUpdateMonotonic());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasId()) {
return false;
}
if (!getId().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.DatanodeIDProto id = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto id_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> idBuilder_;
/**
* required .hadoop.hdfs.DatanodeIDProto id = 1;
*/
public boolean hasId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.DatanodeIDProto id = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getId() {
if (idBuilder_ == null) {
return id_;
} else {
return idBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.DatanodeIDProto id = 1;
*/
public Builder setId(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
if (idBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
id_ = value;
onChanged();
} else {
idBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.DatanodeIDProto id = 1;
*/
public Builder setId(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) {
if (idBuilder_ == null) {
id_ = builderForValue.build();
onChanged();
} else {
idBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.DatanodeIDProto id = 1;
*/
public Builder mergeId(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
if (idBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
id_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()) {
id_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder(id_).mergeFrom(value).buildPartial();
} else {
id_ = value;
}
onChanged();
} else {
idBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.DatanodeIDProto id = 1;
*/
public Builder clearId() {
if (idBuilder_ == null) {
id_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance();
onChanged();
} else {
idBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.DatanodeIDProto id = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder getIdBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getIdFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.DatanodeIDProto id = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getIdOrBuilder() {
if (idBuilder_ != null) {
return idBuilder_.getMessageOrBuilder();
} else {
return id_;
}
}
/**
* required .hadoop.hdfs.DatanodeIDProto id = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>
getIdFieldBuilder() {
if (idBuilder_ == null) {
idBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>(
id_,
getParentForChildren(),
isClean());
id_ = null;
}
return idBuilder_;
}
// optional uint64 capacity = 2 [default = 0];
private long capacity_ ;
/**
* optional uint64 capacity = 2 [default = 0];
*/
public boolean hasCapacity() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional uint64 capacity = 2 [default = 0];
*/
public long getCapacity() {
return capacity_;
}
/**
* optional uint64 capacity = 2 [default = 0];
*/
public Builder setCapacity(long value) {
bitField0_ |= 0x00000002;
capacity_ = value;
onChanged();
return this;
}
/**
* optional uint64 capacity = 2 [default = 0];
*/
public Builder clearCapacity() {
bitField0_ = (bitField0_ & ~0x00000002);
capacity_ = 0L;
onChanged();
return this;
}
// optional uint64 dfsUsed = 3 [default = 0];
private long dfsUsed_ ;
/**
* optional uint64 dfsUsed = 3 [default = 0];
*/
public boolean hasDfsUsed() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional uint64 dfsUsed = 3 [default = 0];
*/
public long getDfsUsed() {
return dfsUsed_;
}
/**
* optional uint64 dfsUsed = 3 [default = 0];
*/
public Builder setDfsUsed(long value) {
bitField0_ |= 0x00000004;
dfsUsed_ = value;
onChanged();
return this;
}
/**
* optional uint64 dfsUsed = 3 [default = 0];
*/
public Builder clearDfsUsed() {
bitField0_ = (bitField0_ & ~0x00000004);
dfsUsed_ = 0L;
onChanged();
return this;
}
// optional uint64 remaining = 4 [default = 0];
private long remaining_ ;
/**
* optional uint64 remaining = 4 [default = 0];
*/
public boolean hasRemaining() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional uint64 remaining = 4 [default = 0];
*/
public long getRemaining() {
return remaining_;
}
/**
* optional uint64 remaining = 4 [default = 0];
*/
public Builder setRemaining(long value) {
bitField0_ |= 0x00000008;
remaining_ = value;
onChanged();
return this;
}
/**
* optional uint64 remaining = 4 [default = 0];
*/
public Builder clearRemaining() {
bitField0_ = (bitField0_ & ~0x00000008);
remaining_ = 0L;
onChanged();
return this;
}
// optional uint64 blockPoolUsed = 5 [default = 0];
private long blockPoolUsed_ ;
/**
* optional uint64 blockPoolUsed = 5 [default = 0];
*/
public boolean hasBlockPoolUsed() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional uint64 blockPoolUsed = 5 [default = 0];
*/
public long getBlockPoolUsed() {
return blockPoolUsed_;
}
/**
* optional uint64 blockPoolUsed = 5 [default = 0];
*/
public Builder setBlockPoolUsed(long value) {
bitField0_ |= 0x00000010;
blockPoolUsed_ = value;
onChanged();
return this;
}
/**
* optional uint64 blockPoolUsed = 5 [default = 0];
*/
public Builder clearBlockPoolUsed() {
bitField0_ = (bitField0_ & ~0x00000010);
blockPoolUsed_ = 0L;
onChanged();
return this;
}
// optional uint64 lastUpdate = 6 [default = 0];
private long lastUpdate_ ;
/**
* optional uint64 lastUpdate = 6 [default = 0];
*/
public boolean hasLastUpdate() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional uint64 lastUpdate = 6 [default = 0];
*/
public long getLastUpdate() {
return lastUpdate_;
}
/**
* optional uint64 lastUpdate = 6 [default = 0];
*/
public Builder setLastUpdate(long value) {
bitField0_ |= 0x00000020;
lastUpdate_ = value;
onChanged();
return this;
}
/**
* optional uint64 lastUpdate = 6 [default = 0];
*/
public Builder clearLastUpdate() {
bitField0_ = (bitField0_ & ~0x00000020);
lastUpdate_ = 0L;
onChanged();
return this;
}
// optional uint32 xceiverCount = 7 [default = 0];
private int xceiverCount_ ;
/**
* optional uint32 xceiverCount = 7 [default = 0];
*/
public boolean hasXceiverCount() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional uint32 xceiverCount = 7 [default = 0];
*/
public int getXceiverCount() {
return xceiverCount_;
}
/**
* optional uint32 xceiverCount = 7 [default = 0];
*/
public Builder setXceiverCount(int value) {
bitField0_ |= 0x00000040;
xceiverCount_ = value;
onChanged();
return this;
}
/**
* optional uint32 xceiverCount = 7 [default = 0];
*/
public Builder clearXceiverCount() {
bitField0_ = (bitField0_ & ~0x00000040);
xceiverCount_ = 0;
onChanged();
return this;
}
// optional string location = 8;
private java.lang.Object location_ = "";
/**
* optional string location = 8;
*/
public boolean hasLocation() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* optional string location = 8;
*/
public java.lang.String getLocation() {
java.lang.Object ref = location_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
location_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string location = 8;
*/
public com.google.protobuf.ByteString
getLocationBytes() {
java.lang.Object ref = location_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
location_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* optional string location = 8;
*/
public Builder setLocation(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000080;
location_ = value;
onChanged();
return this;
}
/**
* optional string location = 8;
*/
public Builder clearLocation() {
bitField0_ = (bitField0_ & ~0x00000080);
location_ = getDefaultInstance().getLocation();
onChanged();
return this;
}
/**
* optional string location = 8;
*/
public Builder setLocationBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000080;
location_ = value;
onChanged();
return this;
}
// optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL];
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState adminState_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.NORMAL;
/**
* optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL];
*/
public boolean hasAdminState() {
return ((bitField0_ & 0x00000100) == 0x00000100);
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL];
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState getAdminState() {
return adminState_;
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL];
*/
public Builder setAdminState(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000100;
adminState_ = value;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL];
*/
public Builder clearAdminState() {
bitField0_ = (bitField0_ & ~0x00000100);
adminState_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.NORMAL;
onChanged();
return this;
}
// optional uint64 cacheCapacity = 11 [default = 0];
private long cacheCapacity_ ;
/**
* optional uint64 cacheCapacity = 11 [default = 0];
*/
public boolean hasCacheCapacity() {
return ((bitField0_ & 0x00000200) == 0x00000200);
}
/**
* optional uint64 cacheCapacity = 11 [default = 0];
*/
public long getCacheCapacity() {
return cacheCapacity_;
}
/**
* optional uint64 cacheCapacity = 11 [default = 0];
*/
public Builder setCacheCapacity(long value) {
bitField0_ |= 0x00000200;
cacheCapacity_ = value;
onChanged();
return this;
}
/**
* optional uint64 cacheCapacity = 11 [default = 0];
*/
public Builder clearCacheCapacity() {
bitField0_ = (bitField0_ & ~0x00000200);
cacheCapacity_ = 0L;
onChanged();
return this;
}
// optional uint64 cacheUsed = 12 [default = 0];
private long cacheUsed_ ;
/**
* optional uint64 cacheUsed = 12 [default = 0];
*/
public boolean hasCacheUsed() {
return ((bitField0_ & 0x00000400) == 0x00000400);
}
/**
* optional uint64 cacheUsed = 12 [default = 0];
*/
public long getCacheUsed() {
return cacheUsed_;
}
/**
* optional uint64 cacheUsed = 12 [default = 0];
*/
public Builder setCacheUsed(long value) {
bitField0_ |= 0x00000400;
cacheUsed_ = value;
onChanged();
return this;
}
/**
* optional uint64 cacheUsed = 12 [default = 0];
*/
public Builder clearCacheUsed() {
bitField0_ = (bitField0_ & ~0x00000400);
cacheUsed_ = 0L;
onChanged();
return this;
}
// optional uint64 lastUpdateMonotonic = 13 [default = 0];
private long lastUpdateMonotonic_ ;
/**
* optional uint64 lastUpdateMonotonic = 13 [default = 0];
*/
public boolean hasLastUpdateMonotonic() {
return ((bitField0_ & 0x00000800) == 0x00000800);
}
/**
* optional uint64 lastUpdateMonotonic = 13 [default = 0];
*/
public long getLastUpdateMonotonic() {
return lastUpdateMonotonic_;
}
/**
* optional uint64 lastUpdateMonotonic = 13 [default = 0];
*/
public Builder setLastUpdateMonotonic(long value) {
bitField0_ |= 0x00000800;
lastUpdateMonotonic_ = value;
onChanged();
return this;
}
/**
* optional uint64 lastUpdateMonotonic = 13 [default = 0];
*/
public Builder clearLastUpdateMonotonic() {
bitField0_ = (bitField0_ & ~0x00000800);
lastUpdateMonotonic_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeInfoProto)
}
static {
defaultInstance = new DatanodeInfoProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeInfoProto)
}
public interface DatanodeStorageProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string storageUuid = 1;
/**
* required string storageUuid = 1;
*/
boolean hasStorageUuid();
/**
* required string storageUuid = 1;
*/
java.lang.String getStorageUuid();
/**
* required string storageUuid = 1;
*/
com.google.protobuf.ByteString
getStorageUuidBytes();
// optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL];
/**
* optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL];
*/
boolean hasState();
/**
* optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL];
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState getState();
// optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK];
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK];
*/
boolean hasStorageType();
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK];
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType();
}
/**
* Protobuf type {@code hadoop.hdfs.DatanodeStorageProto}
*
*
**
* Represents a storage available on the datanode
*
*/
public static final class DatanodeStorageProto extends
com.google.protobuf.GeneratedMessage
implements DatanodeStorageProtoOrBuilder {
// Use DatanodeStorageProto.newBuilder() to construct.
private DatanodeStorageProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private DatanodeStorageProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final DatanodeStorageProto defaultInstance;
public static DatanodeStorageProto getDefaultInstance() {
return defaultInstance;
}
public DatanodeStorageProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DatanodeStorageProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
storageUuid_ = input.readBytes();
break;
}
case 16: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(2, rawValue);
} else {
bitField0_ |= 0x00000002;
state_ = value;
}
break;
}
case 24: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(3, rawValue);
} else {
bitField0_ |= 0x00000004;
storageType_ = value;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeStorageProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeStorageProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public DatanodeStorageProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new DatanodeStorageProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
/**
* Protobuf enum {@code hadoop.hdfs.DatanodeStorageProto.StorageState}
*/
public enum StorageState
implements com.google.protobuf.ProtocolMessageEnum {
/**
* NORMAL = 0;
*/
NORMAL(0, 0),
/**
* READ_ONLY_SHARED = 1;
*/
READ_ONLY_SHARED(1, 1),
;
/**
* NORMAL = 0;
*/
public static final int NORMAL_VALUE = 0;
/**
* READ_ONLY_SHARED = 1;
*/
public static final int READ_ONLY_SHARED_VALUE = 1;
public final int getNumber() { return value; }
public static StorageState valueOf(int value) {
switch (value) {
case 0: return NORMAL;
case 1: return READ_ONLY_SHARED;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public StorageState findValueByNumber(int number) {
return StorageState.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDescriptor().getEnumTypes().get(0);
}
private static final StorageState[] VALUES = values();
public static StorageState valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private StorageState(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.DatanodeStorageProto.StorageState)
}
private int bitField0_;
// required string storageUuid = 1;
public static final int STORAGEUUID_FIELD_NUMBER = 1;
private java.lang.Object storageUuid_;
/**
* required string storageUuid = 1;
*/
public boolean hasStorageUuid() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string storageUuid = 1;
*/
public java.lang.String getStorageUuid() {
java.lang.Object ref = storageUuid_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
storageUuid_ = s;
}
return s;
}
}
/**
* required string storageUuid = 1;
*/
public com.google.protobuf.ByteString
getStorageUuidBytes() {
java.lang.Object ref = storageUuid_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
storageUuid_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL];
public static final int STATE_FIELD_NUMBER = 2;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState state_;
/**
* optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL];
*/
public boolean hasState() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL];
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState getState() {
return state_;
}
// optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK];
public static final int STORAGETYPE_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto storageType_;
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK];
*/
public boolean hasStorageType() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK];
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() {
return storageType_;
}
private void initFields() {
storageUuid_ = "";
state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState.NORMAL;
storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasStorageUuid()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getStorageUuidBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeEnum(2, state_.getNumber());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeEnum(3, storageType_.getNumber());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getStorageUuidBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(2, state_.getNumber());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(3, storageType_.getNumber());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto) obj;
boolean result = true;
result = result && (hasStorageUuid() == other.hasStorageUuid());
if (hasStorageUuid()) {
result = result && getStorageUuid()
.equals(other.getStorageUuid());
}
result = result && (hasState() == other.hasState());
if (hasState()) {
result = result &&
(getState() == other.getState());
}
result = result && (hasStorageType() == other.hasStorageType());
if (hasStorageType()) {
result = result &&
(getStorageType() == other.getStorageType());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasStorageUuid()) {
hash = (37 * hash) + STORAGEUUID_FIELD_NUMBER;
hash = (53 * hash) + getStorageUuid().hashCode();
}
if (hasState()) {
hash = (37 * hash) + STATE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getState());
}
if (hasStorageType()) {
hash = (37 * hash) + STORAGETYPE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getStorageType());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.DatanodeStorageProto}
*
*
**
* Represents a storage available on the datanode
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeStorageProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeStorageProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
storageUuid_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState.NORMAL;
bitField0_ = (bitField0_ & ~0x00000002);
storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeStorageProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.storageUuid_ = storageUuid_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.state_ = state_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.storageType_ = storageType_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance()) return this;
if (other.hasStorageUuid()) {
bitField0_ |= 0x00000001;
storageUuid_ = other.storageUuid_;
onChanged();
}
if (other.hasState()) {
setState(other.getState());
}
if (other.hasStorageType()) {
setStorageType(other.getStorageType());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasStorageUuid()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string storageUuid = 1;
private java.lang.Object storageUuid_ = "";
/**
* required string storageUuid = 1;
*/
public boolean hasStorageUuid() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string storageUuid = 1;
*/
public java.lang.String getStorageUuid() {
java.lang.Object ref = storageUuid_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
storageUuid_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string storageUuid = 1;
*/
public com.google.protobuf.ByteString
getStorageUuidBytes() {
java.lang.Object ref = storageUuid_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
storageUuid_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string storageUuid = 1;
*/
public Builder setStorageUuid(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
storageUuid_ = value;
onChanged();
return this;
}
/**
* required string storageUuid = 1;
*/
public Builder clearStorageUuid() {
bitField0_ = (bitField0_ & ~0x00000001);
storageUuid_ = getDefaultInstance().getStorageUuid();
onChanged();
return this;
}
/**
* required string storageUuid = 1;
*/
public Builder setStorageUuidBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
storageUuid_ = value;
onChanged();
return this;
}
// optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL];
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState.NORMAL;
/**
* optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL];
*/
public boolean hasState() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL];
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState getState() {
return state_;
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL];
*/
public Builder setState(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
state_ = value;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL];
*/
public Builder clearState() {
bitField0_ = (bitField0_ & ~0x00000002);
state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState.NORMAL;
onChanged();
return this;
}
// optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK];
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK];
*/
public boolean hasStorageType() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK];
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() {
return storageType_;
}
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK];
*/
public Builder setStorageType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
storageType_ = value;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK];
*/
public Builder clearStorageType() {
bitField0_ = (bitField0_ & ~0x00000004);
storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeStorageProto)
}
static {
defaultInstance = new DatanodeStorageProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeStorageProto)
}
public interface StorageReportProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string storageUuid = 1 [deprecated = true];
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated boolean hasStorageUuid();
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated java.lang.String getStorageUuid();
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated com.google.protobuf.ByteString
getStorageUuidBytes();
// optional bool failed = 2 [default = false];
/**
* optional bool failed = 2 [default = false];
*/
boolean hasFailed();
/**
* optional bool failed = 2 [default = false];
*/
boolean getFailed();
// optional uint64 capacity = 3 [default = 0];
/**
* optional uint64 capacity = 3 [default = 0];
*/
boolean hasCapacity();
/**
* optional uint64 capacity = 3 [default = 0];
*/
long getCapacity();
// optional uint64 dfsUsed = 4 [default = 0];
/**
* optional uint64 dfsUsed = 4 [default = 0];
*/
boolean hasDfsUsed();
/**
* optional uint64 dfsUsed = 4 [default = 0];
*/
long getDfsUsed();
// optional uint64 remaining = 5 [default = 0];
/**
* optional uint64 remaining = 5 [default = 0];
*/
boolean hasRemaining();
/**
* optional uint64 remaining = 5 [default = 0];
*/
long getRemaining();
// optional uint64 blockPoolUsed = 6 [default = 0];
/**
* optional uint64 blockPoolUsed = 6 [default = 0];
*/
boolean hasBlockPoolUsed();
/**
* optional uint64 blockPoolUsed = 6 [default = 0];
*/
long getBlockPoolUsed();
// optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
*
*
* supersedes StorageUuid
*
*/
boolean hasStorage();
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
*
*
* supersedes StorageUuid
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getStorage();
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
*
*
* supersedes StorageUuid
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.StorageReportProto}
*/
public static final class StorageReportProto extends
com.google.protobuf.GeneratedMessage
implements StorageReportProtoOrBuilder {
// Use StorageReportProto.newBuilder() to construct.
private StorageReportProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private StorageReportProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final StorageReportProto defaultInstance;
public static StorageReportProto getDefaultInstance() {
return defaultInstance;
}
public StorageReportProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private StorageReportProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
storageUuid_ = input.readBytes();
break;
}
case 16: {
bitField0_ |= 0x00000002;
failed_ = input.readBool();
break;
}
case 24: {
bitField0_ |= 0x00000004;
capacity_ = input.readUInt64();
break;
}
case 32: {
bitField0_ |= 0x00000008;
dfsUsed_ = input.readUInt64();
break;
}
case 40: {
bitField0_ |= 0x00000010;
remaining_ = input.readUInt64();
break;
}
case 48: {
bitField0_ |= 0x00000020;
blockPoolUsed_ = input.readUInt64();
break;
}
case 58: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000040) == 0x00000040)) {
subBuilder = storage_.toBuilder();
}
storage_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(storage_);
storage_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000040;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageReportProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageReportProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public StorageReportProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new StorageReportProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required string storageUuid = 1 [deprecated = true];
public static final int STORAGEUUID_FIELD_NUMBER = 1;
private java.lang.Object storageUuid_;
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated public boolean hasStorageUuid() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated public java.lang.String getStorageUuid() {
java.lang.Object ref = storageUuid_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
storageUuid_ = s;
}
return s;
}
}
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated public com.google.protobuf.ByteString
getStorageUuidBytes() {
java.lang.Object ref = storageUuid_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
storageUuid_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// optional bool failed = 2 [default = false];
public static final int FAILED_FIELD_NUMBER = 2;
private boolean failed_;
/**
* optional bool failed = 2 [default = false];
*/
public boolean hasFailed() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional bool failed = 2 [default = false];
*/
public boolean getFailed() {
return failed_;
}
// optional uint64 capacity = 3 [default = 0];
public static final int CAPACITY_FIELD_NUMBER = 3;
private long capacity_;
/**
* optional uint64 capacity = 3 [default = 0];
*/
public boolean hasCapacity() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional uint64 capacity = 3 [default = 0];
*/
public long getCapacity() {
return capacity_;
}
// optional uint64 dfsUsed = 4 [default = 0];
public static final int DFSUSED_FIELD_NUMBER = 4;
private long dfsUsed_;
/**
* optional uint64 dfsUsed = 4 [default = 0];
*/
public boolean hasDfsUsed() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional uint64 dfsUsed = 4 [default = 0];
*/
public long getDfsUsed() {
return dfsUsed_;
}
// optional uint64 remaining = 5 [default = 0];
public static final int REMAINING_FIELD_NUMBER = 5;
private long remaining_;
/**
* optional uint64 remaining = 5 [default = 0];
*/
public boolean hasRemaining() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional uint64 remaining = 5 [default = 0];
*/
public long getRemaining() {
return remaining_;
}
// optional uint64 blockPoolUsed = 6 [default = 0];
public static final int BLOCKPOOLUSED_FIELD_NUMBER = 6;
private long blockPoolUsed_;
/**
* optional uint64 blockPoolUsed = 6 [default = 0];
*/
public boolean hasBlockPoolUsed() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional uint64 blockPoolUsed = 6 [default = 0];
*/
public long getBlockPoolUsed() {
return blockPoolUsed_;
}
// optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
public static final int STORAGE_FIELD_NUMBER = 7;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto storage_;
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
*
*
* supersedes StorageUuid
*
*/
public boolean hasStorage() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
*
*
* supersedes StorageUuid
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getStorage() {
return storage_;
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
*
*
* supersedes StorageUuid
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder() {
return storage_;
}
private void initFields() {
storageUuid_ = "";
failed_ = false;
capacity_ = 0L;
dfsUsed_ = 0L;
remaining_ = 0L;
blockPoolUsed_ = 0L;
storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasStorageUuid()) {
memoizedIsInitialized = 0;
return false;
}
if (hasStorage()) {
if (!getStorage().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getStorageUuidBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBool(2, failed_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt64(3, capacity_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt64(4, dfsUsed_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeUInt64(5, remaining_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeUInt64(6, blockPoolUsed_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeMessage(7, storage_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getStorageUuidBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(2, failed_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(3, capacity_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(4, dfsUsed_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(5, remaining_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(6, blockPoolUsed_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(7, storage_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto) obj;
boolean result = true;
result = result && (hasStorageUuid() == other.hasStorageUuid());
if (hasStorageUuid()) {
result = result && getStorageUuid()
.equals(other.getStorageUuid());
}
result = result && (hasFailed() == other.hasFailed());
if (hasFailed()) {
result = result && (getFailed()
== other.getFailed());
}
result = result && (hasCapacity() == other.hasCapacity());
if (hasCapacity()) {
result = result && (getCapacity()
== other.getCapacity());
}
result = result && (hasDfsUsed() == other.hasDfsUsed());
if (hasDfsUsed()) {
result = result && (getDfsUsed()
== other.getDfsUsed());
}
result = result && (hasRemaining() == other.hasRemaining());
if (hasRemaining()) {
result = result && (getRemaining()
== other.getRemaining());
}
result = result && (hasBlockPoolUsed() == other.hasBlockPoolUsed());
if (hasBlockPoolUsed()) {
result = result && (getBlockPoolUsed()
== other.getBlockPoolUsed());
}
result = result && (hasStorage() == other.hasStorage());
if (hasStorage()) {
result = result && getStorage()
.equals(other.getStorage());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasStorageUuid()) {
hash = (37 * hash) + STORAGEUUID_FIELD_NUMBER;
hash = (53 * hash) + getStorageUuid().hashCode();
}
if (hasFailed()) {
hash = (37 * hash) + FAILED_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getFailed());
}
if (hasCapacity()) {
hash = (37 * hash) + CAPACITY_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getCapacity());
}
if (hasDfsUsed()) {
hash = (37 * hash) + DFSUSED_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getDfsUsed());
}
if (hasRemaining()) {
hash = (37 * hash) + REMAINING_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getRemaining());
}
if (hasBlockPoolUsed()) {
hash = (37 * hash) + BLOCKPOOLUSED_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getBlockPoolUsed());
}
if (hasStorage()) {
hash = (37 * hash) + STORAGE_FIELD_NUMBER;
hash = (53 * hash) + getStorage().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.StorageReportProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageReportProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageReportProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getStorageFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
storageUuid_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
failed_ = false;
bitField0_ = (bitField0_ & ~0x00000002);
capacity_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
dfsUsed_ = 0L;
bitField0_ = (bitField0_ & ~0x00000008);
remaining_ = 0L;
bitField0_ = (bitField0_ & ~0x00000010);
blockPoolUsed_ = 0L;
bitField0_ = (bitField0_ & ~0x00000020);
if (storageBuilder_ == null) {
storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance();
} else {
storageBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000040);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageReportProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.storageUuid_ = storageUuid_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.failed_ = failed_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.capacity_ = capacity_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.dfsUsed_ = dfsUsed_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.remaining_ = remaining_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000020;
}
result.blockPoolUsed_ = blockPoolUsed_;
if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
to_bitField0_ |= 0x00000040;
}
if (storageBuilder_ == null) {
result.storage_ = storage_;
} else {
result.storage_ = storageBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.getDefaultInstance()) return this;
if (other.hasStorageUuid()) {
bitField0_ |= 0x00000001;
storageUuid_ = other.storageUuid_;
onChanged();
}
if (other.hasFailed()) {
setFailed(other.getFailed());
}
if (other.hasCapacity()) {
setCapacity(other.getCapacity());
}
if (other.hasDfsUsed()) {
setDfsUsed(other.getDfsUsed());
}
if (other.hasRemaining()) {
setRemaining(other.getRemaining());
}
if (other.hasBlockPoolUsed()) {
setBlockPoolUsed(other.getBlockPoolUsed());
}
if (other.hasStorage()) {
mergeStorage(other.getStorage());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasStorageUuid()) {
return false;
}
if (hasStorage()) {
if (!getStorage().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string storageUuid = 1 [deprecated = true];
private java.lang.Object storageUuid_ = "";
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated public boolean hasStorageUuid() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated public java.lang.String getStorageUuid() {
java.lang.Object ref = storageUuid_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
storageUuid_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated public com.google.protobuf.ByteString
getStorageUuidBytes() {
java.lang.Object ref = storageUuid_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
storageUuid_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated public Builder setStorageUuid(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
storageUuid_ = value;
onChanged();
return this;
}
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated public Builder clearStorageUuid() {
bitField0_ = (bitField0_ & ~0x00000001);
storageUuid_ = getDefaultInstance().getStorageUuid();
onChanged();
return this;
}
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated public Builder setStorageUuidBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
storageUuid_ = value;
onChanged();
return this;
}
// optional bool failed = 2 [default = false];
private boolean failed_ ;
/**
* optional bool failed = 2 [default = false];
*/
public boolean hasFailed() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional bool failed = 2 [default = false];
*/
public boolean getFailed() {
return failed_;
}
/**
* optional bool failed = 2 [default = false];
*/
public Builder setFailed(boolean value) {
bitField0_ |= 0x00000002;
failed_ = value;
onChanged();
return this;
}
/**
* optional bool failed = 2 [default = false];
*/
public Builder clearFailed() {
bitField0_ = (bitField0_ & ~0x00000002);
failed_ = false;
onChanged();
return this;
}
// optional uint64 capacity = 3 [default = 0];
private long capacity_ ;
/**
* optional uint64 capacity = 3 [default = 0];
*/
public boolean hasCapacity() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional uint64 capacity = 3 [default = 0];
*/
public long getCapacity() {
return capacity_;
}
/**
* optional uint64 capacity = 3 [default = 0];
*/
public Builder setCapacity(long value) {
bitField0_ |= 0x00000004;
capacity_ = value;
onChanged();
return this;
}
/**
* optional uint64 capacity = 3 [default = 0];
*/
public Builder clearCapacity() {
bitField0_ = (bitField0_ & ~0x00000004);
capacity_ = 0L;
onChanged();
return this;
}
// optional uint64 dfsUsed = 4 [default = 0];
private long dfsUsed_ ;
/**
* optional uint64 dfsUsed = 4 [default = 0];
*/
public boolean hasDfsUsed() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional uint64 dfsUsed = 4 [default = 0];
*/
public long getDfsUsed() {
return dfsUsed_;
}
/**
* optional uint64 dfsUsed = 4 [default = 0];
*/
public Builder setDfsUsed(long value) {
bitField0_ |= 0x00000008;
dfsUsed_ = value;
onChanged();
return this;
}
/**
* optional uint64 dfsUsed = 4 [default = 0];
*/
public Builder clearDfsUsed() {
bitField0_ = (bitField0_ & ~0x00000008);
dfsUsed_ = 0L;
onChanged();
return this;
}
// optional uint64 remaining = 5 [default = 0];
private long remaining_ ;
/**
* optional uint64 remaining = 5 [default = 0];
*/
public boolean hasRemaining() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional uint64 remaining = 5 [default = 0];
*/
public long getRemaining() {
return remaining_;
}
/**
* optional uint64 remaining = 5 [default = 0];
*/
public Builder setRemaining(long value) {
bitField0_ |= 0x00000010;
remaining_ = value;
onChanged();
return this;
}
/**
* optional uint64 remaining = 5 [default = 0];
*/
public Builder clearRemaining() {
bitField0_ = (bitField0_ & ~0x00000010);
remaining_ = 0L;
onChanged();
return this;
}
// optional uint64 blockPoolUsed = 6 [default = 0];
private long blockPoolUsed_ ;
/**
* optional uint64 blockPoolUsed = 6 [default = 0];
*/
public boolean hasBlockPoolUsed() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional uint64 blockPoolUsed = 6 [default = 0];
*/
public long getBlockPoolUsed() {
return blockPoolUsed_;
}
/**
* optional uint64 blockPoolUsed = 6 [default = 0];
*/
public Builder setBlockPoolUsed(long value) {
bitField0_ |= 0x00000020;
blockPoolUsed_ = value;
onChanged();
return this;
}
/**
* optional uint64 blockPoolUsed = 6 [default = 0];
*/
public Builder clearBlockPoolUsed() {
bitField0_ = (bitField0_ & ~0x00000020);
blockPoolUsed_ = 0L;
onChanged();
return this;
}
// optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder> storageBuilder_;
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
*
*
* supersedes StorageUuid
*
*/
public boolean hasStorage() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
*
*
* supersedes StorageUuid
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getStorage() {
if (storageBuilder_ == null) {
return storage_;
} else {
return storageBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
*
*
* supersedes StorageUuid
*
*/
public Builder setStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto value) {
if (storageBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
storage_ = value;
onChanged();
} else {
storageBuilder_.setMessage(value);
}
bitField0_ |= 0x00000040;
return this;
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
*
*
* supersedes StorageUuid
*
*/
public Builder setStorage(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder builderForValue) {
if (storageBuilder_ == null) {
storage_ = builderForValue.build();
onChanged();
} else {
storageBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000040;
return this;
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
*
*
* supersedes StorageUuid
*
*/
public Builder mergeStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto value) {
if (storageBuilder_ == null) {
if (((bitField0_ & 0x00000040) == 0x00000040) &&
storage_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance()) {
storage_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.newBuilder(storage_).mergeFrom(value).buildPartial();
} else {
storage_ = value;
}
onChanged();
} else {
storageBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000040;
return this;
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
*
*
* supersedes StorageUuid
*
*/
public Builder clearStorage() {
if (storageBuilder_ == null) {
storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance();
onChanged();
} else {
storageBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000040);
return this;
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
*
*
* supersedes StorageUuid
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder getStorageBuilder() {
bitField0_ |= 0x00000040;
onChanged();
return getStorageFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
*
*
* supersedes StorageUuid
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder() {
if (storageBuilder_ != null) {
return storageBuilder_.getMessageOrBuilder();
} else {
return storage_;
}
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
*
*
* supersedes StorageUuid
*
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder>
getStorageFieldBuilder() {
if (storageBuilder_ == null) {
storageBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder>(
storage_,
getParentForChildren(),
isClean());
storage_ = null;
}
return storageBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageReportProto)
}
static {
defaultInstance = new StorageReportProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageReportProto)
}
public interface ContentSummaryProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required uint64 length = 1;
/**
* required uint64 length = 1;
*/
boolean hasLength();
/**
* required uint64 length = 1;
*/
long getLength();
// required uint64 fileCount = 2;
/**
* required uint64 fileCount = 2;
*/
boolean hasFileCount();
/**
* required uint64 fileCount = 2;
*/
long getFileCount();
// required uint64 directoryCount = 3;
/**
* required uint64 directoryCount = 3;
*/
boolean hasDirectoryCount();
/**
* required uint64 directoryCount = 3;
*/
long getDirectoryCount();
// required uint64 quota = 4;
/**
* required uint64 quota = 4;
*/
boolean hasQuota();
/**
* required uint64 quota = 4;
*/
long getQuota();
// required uint64 spaceConsumed = 5;
/**
* required uint64 spaceConsumed = 5;
*/
boolean hasSpaceConsumed();
/**
* required uint64 spaceConsumed = 5;
*/
long getSpaceConsumed();
// required uint64 spaceQuota = 6;
/**
* required uint64 spaceQuota = 6;
*/
boolean hasSpaceQuota();
/**
* required uint64 spaceQuota = 6;
*/
long getSpaceQuota();
// optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
*/
boolean hasTypeQuotaInfos();
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getTypeQuotaInfos();
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder getTypeQuotaInfosOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.ContentSummaryProto}
*
*
**
* Summary of a file or directory
*
*/
public static final class ContentSummaryProto extends
com.google.protobuf.GeneratedMessage
implements ContentSummaryProtoOrBuilder {
// Use ContentSummaryProto.newBuilder() to construct.
private ContentSummaryProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ContentSummaryProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ContentSummaryProto defaultInstance;
public static ContentSummaryProto getDefaultInstance() {
return defaultInstance;
}
public ContentSummaryProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ContentSummaryProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
length_ = input.readUInt64();
break;
}
case 16: {
bitField0_ |= 0x00000002;
fileCount_ = input.readUInt64();
break;
}
case 24: {
bitField0_ |= 0x00000004;
directoryCount_ = input.readUInt64();
break;
}
case 32: {
bitField0_ |= 0x00000008;
quota_ = input.readUInt64();
break;
}
case 40: {
bitField0_ |= 0x00000010;
spaceConsumed_ = input.readUInt64();
break;
}
case 48: {
bitField0_ |= 0x00000020;
spaceQuota_ = input.readUInt64();
break;
}
case 58: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000040) == 0x00000040)) {
subBuilder = typeQuotaInfos_.toBuilder();
}
typeQuotaInfos_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(typeQuotaInfos_);
typeQuotaInfos_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000040;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ContentSummaryProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ContentSummaryProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ContentSummaryProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ContentSummaryProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required uint64 length = 1;
public static final int LENGTH_FIELD_NUMBER = 1;
private long length_;
/**
* required uint64 length = 1;
*/
public boolean hasLength() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint64 length = 1;
*/
public long getLength() {
return length_;
}
// required uint64 fileCount = 2;
public static final int FILECOUNT_FIELD_NUMBER = 2;
private long fileCount_;
/**
* required uint64 fileCount = 2;
*/
public boolean hasFileCount() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 fileCount = 2;
*/
public long getFileCount() {
return fileCount_;
}
// required uint64 directoryCount = 3;
public static final int DIRECTORYCOUNT_FIELD_NUMBER = 3;
private long directoryCount_;
/**
* required uint64 directoryCount = 3;
*/
public boolean hasDirectoryCount() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 directoryCount = 3;
*/
public long getDirectoryCount() {
return directoryCount_;
}
// required uint64 quota = 4;
public static final int QUOTA_FIELD_NUMBER = 4;
private long quota_;
/**
* required uint64 quota = 4;
*/
public boolean hasQuota() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required uint64 quota = 4;
*/
public long getQuota() {
return quota_;
}
// required uint64 spaceConsumed = 5;
public static final int SPACECONSUMED_FIELD_NUMBER = 5;
private long spaceConsumed_;
/**
* required uint64 spaceConsumed = 5;
*/
public boolean hasSpaceConsumed() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required uint64 spaceConsumed = 5;
*/
public long getSpaceConsumed() {
return spaceConsumed_;
}
// required uint64 spaceQuota = 6;
public static final int SPACEQUOTA_FIELD_NUMBER = 6;
private long spaceQuota_;
/**
* required uint64 spaceQuota = 6;
*/
public boolean hasSpaceQuota() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* required uint64 spaceQuota = 6;
*/
public long getSpaceQuota() {
return spaceQuota_;
}
// optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
public static final int TYPEQUOTAINFOS_FIELD_NUMBER = 7;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto typeQuotaInfos_;
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
*/
public boolean hasTypeQuotaInfos() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getTypeQuotaInfos() {
return typeQuotaInfos_;
}
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder getTypeQuotaInfosOrBuilder() {
return typeQuotaInfos_;
}
private void initFields() {
length_ = 0L;
fileCount_ = 0L;
directoryCount_ = 0L;
quota_ = 0L;
spaceConsumed_ = 0L;
spaceQuota_ = 0L;
typeQuotaInfos_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasLength()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasFileCount()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasDirectoryCount()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasQuota()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasSpaceConsumed()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasSpaceQuota()) {
memoizedIsInitialized = 0;
return false;
}
if (hasTypeQuotaInfos()) {
if (!getTypeQuotaInfos().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt64(1, length_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, fileCount_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt64(3, directoryCount_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt64(4, quota_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeUInt64(5, spaceConsumed_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeUInt64(6, spaceQuota_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeMessage(7, typeQuotaInfos_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(1, length_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, fileCount_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(3, directoryCount_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(4, quota_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(5, spaceConsumed_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(6, spaceQuota_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(7, typeQuotaInfos_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto) obj;
boolean result = true;
result = result && (hasLength() == other.hasLength());
if (hasLength()) {
result = result && (getLength()
== other.getLength());
}
result = result && (hasFileCount() == other.hasFileCount());
if (hasFileCount()) {
result = result && (getFileCount()
== other.getFileCount());
}
result = result && (hasDirectoryCount() == other.hasDirectoryCount());
if (hasDirectoryCount()) {
result = result && (getDirectoryCount()
== other.getDirectoryCount());
}
result = result && (hasQuota() == other.hasQuota());
if (hasQuota()) {
result = result && (getQuota()
== other.getQuota());
}
result = result && (hasSpaceConsumed() == other.hasSpaceConsumed());
if (hasSpaceConsumed()) {
result = result && (getSpaceConsumed()
== other.getSpaceConsumed());
}
result = result && (hasSpaceQuota() == other.hasSpaceQuota());
if (hasSpaceQuota()) {
result = result && (getSpaceQuota()
== other.getSpaceQuota());
}
result = result && (hasTypeQuotaInfos() == other.hasTypeQuotaInfos());
if (hasTypeQuotaInfos()) {
result = result && getTypeQuotaInfos()
.equals(other.getTypeQuotaInfos());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasLength()) {
hash = (37 * hash) + LENGTH_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getLength());
}
if (hasFileCount()) {
hash = (37 * hash) + FILECOUNT_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getFileCount());
}
if (hasDirectoryCount()) {
hash = (37 * hash) + DIRECTORYCOUNT_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getDirectoryCount());
}
if (hasQuota()) {
hash = (37 * hash) + QUOTA_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getQuota());
}
if (hasSpaceConsumed()) {
hash = (37 * hash) + SPACECONSUMED_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getSpaceConsumed());
}
if (hasSpaceQuota()) {
hash = (37 * hash) + SPACEQUOTA_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getSpaceQuota());
}
if (hasTypeQuotaInfos()) {
hash = (37 * hash) + TYPEQUOTAINFOS_FIELD_NUMBER;
hash = (53 * hash) + getTypeQuotaInfos().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ContentSummaryProto}
*
*
**
* Summary of a file or directory
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ContentSummaryProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ContentSummaryProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getTypeQuotaInfosFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
length_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
fileCount_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
directoryCount_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
quota_ = 0L;
bitField0_ = (bitField0_ & ~0x00000008);
spaceConsumed_ = 0L;
bitField0_ = (bitField0_ & ~0x00000010);
spaceQuota_ = 0L;
bitField0_ = (bitField0_ & ~0x00000020);
if (typeQuotaInfosBuilder_ == null) {
typeQuotaInfos_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance();
} else {
typeQuotaInfosBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000040);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ContentSummaryProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.length_ = length_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.fileCount_ = fileCount_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.directoryCount_ = directoryCount_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.quota_ = quota_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.spaceConsumed_ = spaceConsumed_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000020;
}
result.spaceQuota_ = spaceQuota_;
if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
to_bitField0_ |= 0x00000040;
}
if (typeQuotaInfosBuilder_ == null) {
result.typeQuotaInfos_ = typeQuotaInfos_;
} else {
result.typeQuotaInfos_ = typeQuotaInfosBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDefaultInstance()) return this;
if (other.hasLength()) {
setLength(other.getLength());
}
if (other.hasFileCount()) {
setFileCount(other.getFileCount());
}
if (other.hasDirectoryCount()) {
setDirectoryCount(other.getDirectoryCount());
}
if (other.hasQuota()) {
setQuota(other.getQuota());
}
if (other.hasSpaceConsumed()) {
setSpaceConsumed(other.getSpaceConsumed());
}
if (other.hasSpaceQuota()) {
setSpaceQuota(other.getSpaceQuota());
}
if (other.hasTypeQuotaInfos()) {
mergeTypeQuotaInfos(other.getTypeQuotaInfos());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasLength()) {
return false;
}
if (!hasFileCount()) {
return false;
}
if (!hasDirectoryCount()) {
return false;
}
if (!hasQuota()) {
return false;
}
if (!hasSpaceConsumed()) {
return false;
}
if (!hasSpaceQuota()) {
return false;
}
if (hasTypeQuotaInfos()) {
if (!getTypeQuotaInfos().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required uint64 length = 1;
private long length_ ;
/**
* required uint64 length = 1;
*/
public boolean hasLength() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint64 length = 1;
*/
public long getLength() {
return length_;
}
/**
* required uint64 length = 1;
*/
public Builder setLength(long value) {
bitField0_ |= 0x00000001;
length_ = value;
onChanged();
return this;
}
/**
* required uint64 length = 1;
*/
public Builder clearLength() {
bitField0_ = (bitField0_ & ~0x00000001);
length_ = 0L;
onChanged();
return this;
}
// required uint64 fileCount = 2;
private long fileCount_ ;
/**
* required uint64 fileCount = 2;
*/
public boolean hasFileCount() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 fileCount = 2;
*/
public long getFileCount() {
return fileCount_;
}
/**
* required uint64 fileCount = 2;
*/
public Builder setFileCount(long value) {
bitField0_ |= 0x00000002;
fileCount_ = value;
onChanged();
return this;
}
/**
* required uint64 fileCount = 2;
*/
public Builder clearFileCount() {
bitField0_ = (bitField0_ & ~0x00000002);
fileCount_ = 0L;
onChanged();
return this;
}
// required uint64 directoryCount = 3;
private long directoryCount_ ;
/**
* required uint64 directoryCount = 3;
*/
public boolean hasDirectoryCount() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 directoryCount = 3;
*/
public long getDirectoryCount() {
return directoryCount_;
}
/**
* required uint64 directoryCount = 3;
*/
public Builder setDirectoryCount(long value) {
bitField0_ |= 0x00000004;
directoryCount_ = value;
onChanged();
return this;
}
/**
* required uint64 directoryCount = 3;
*/
public Builder clearDirectoryCount() {
bitField0_ = (bitField0_ & ~0x00000004);
directoryCount_ = 0L;
onChanged();
return this;
}
// required uint64 quota = 4;
private long quota_ ;
/**
* required uint64 quota = 4;
*/
public boolean hasQuota() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required uint64 quota = 4;
*/
public long getQuota() {
return quota_;
}
/**
* required uint64 quota = 4;
*/
public Builder setQuota(long value) {
bitField0_ |= 0x00000008;
quota_ = value;
onChanged();
return this;
}
/**
* required uint64 quota = 4;
*/
public Builder clearQuota() {
bitField0_ = (bitField0_ & ~0x00000008);
quota_ = 0L;
onChanged();
return this;
}
// required uint64 spaceConsumed = 5;
private long spaceConsumed_ ;
/**
* required uint64 spaceConsumed = 5;
*/
public boolean hasSpaceConsumed() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required uint64 spaceConsumed = 5;
*/
public long getSpaceConsumed() {
return spaceConsumed_;
}
/**
* required uint64 spaceConsumed = 5;
*/
public Builder setSpaceConsumed(long value) {
bitField0_ |= 0x00000010;
spaceConsumed_ = value;
onChanged();
return this;
}
/**
* required uint64 spaceConsumed = 5;
*/
public Builder clearSpaceConsumed() {
bitField0_ = (bitField0_ & ~0x00000010);
spaceConsumed_ = 0L;
onChanged();
return this;
}
// required uint64 spaceQuota = 6;
private long spaceQuota_ ;
/**
* required uint64 spaceQuota = 6;
*/
public boolean hasSpaceQuota() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* required uint64 spaceQuota = 6;
*/
public long getSpaceQuota() {
return spaceQuota_;
}
/**
* required uint64 spaceQuota = 6;
*/
public Builder setSpaceQuota(long value) {
bitField0_ |= 0x00000020;
spaceQuota_ = value;
onChanged();
return this;
}
/**
* required uint64 spaceQuota = 6;
*/
public Builder clearSpaceQuota() {
bitField0_ = (bitField0_ & ~0x00000020);
spaceQuota_ = 0L;
onChanged();
return this;
}
// optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto typeQuotaInfos_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder> typeQuotaInfosBuilder_;
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
*/
public boolean hasTypeQuotaInfos() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getTypeQuotaInfos() {
if (typeQuotaInfosBuilder_ == null) {
return typeQuotaInfos_;
} else {
return typeQuotaInfosBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
*/
public Builder setTypeQuotaInfos(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto value) {
if (typeQuotaInfosBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
typeQuotaInfos_ = value;
onChanged();
} else {
typeQuotaInfosBuilder_.setMessage(value);
}
bitField0_ |= 0x00000040;
return this;
}
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
*/
public Builder setTypeQuotaInfos(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder builderForValue) {
if (typeQuotaInfosBuilder_ == null) {
typeQuotaInfos_ = builderForValue.build();
onChanged();
} else {
typeQuotaInfosBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000040;
return this;
}
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
*/
public Builder mergeTypeQuotaInfos(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto value) {
if (typeQuotaInfosBuilder_ == null) {
if (((bitField0_ & 0x00000040) == 0x00000040) &&
typeQuotaInfos_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance()) {
typeQuotaInfos_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.newBuilder(typeQuotaInfos_).mergeFrom(value).buildPartial();
} else {
typeQuotaInfos_ = value;
}
onChanged();
} else {
typeQuotaInfosBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000040;
return this;
}
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
*/
public Builder clearTypeQuotaInfos() {
if (typeQuotaInfosBuilder_ == null) {
typeQuotaInfos_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance();
onChanged();
} else {
typeQuotaInfosBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000040);
return this;
}
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder getTypeQuotaInfosBuilder() {
bitField0_ |= 0x00000040;
onChanged();
return getTypeQuotaInfosFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder getTypeQuotaInfosOrBuilder() {
if (typeQuotaInfosBuilder_ != null) {
return typeQuotaInfosBuilder_.getMessageOrBuilder();
} else {
return typeQuotaInfos_;
}
}
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder>
getTypeQuotaInfosFieldBuilder() {
if (typeQuotaInfosBuilder_ == null) {
typeQuotaInfosBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder>(
typeQuotaInfos_,
getParentForChildren(),
isClean());
typeQuotaInfos_ = null;
}
return typeQuotaInfosBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ContentSummaryProto)
}
static {
defaultInstance = new ContentSummaryProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ContentSummaryProto)
}
public interface StorageTypeQuotaInfosProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
java.util.List
getTypeQuotaInfoList();
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto getTypeQuotaInfo(int index);
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
int getTypeQuotaInfoCount();
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder>
getTypeQuotaInfoOrBuilderList();
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder getTypeQuotaInfoOrBuilder(
int index);
}
/**
* Protobuf type {@code hadoop.hdfs.StorageTypeQuotaInfosProto}
*
*
**
* Storage type quota and usage information of a file or directory
*
*/
public static final class StorageTypeQuotaInfosProto extends
com.google.protobuf.GeneratedMessage
implements StorageTypeQuotaInfosProtoOrBuilder {
// Use StorageTypeQuotaInfosProto.newBuilder() to construct.
private StorageTypeQuotaInfosProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private StorageTypeQuotaInfosProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final StorageTypeQuotaInfosProto defaultInstance;
public static StorageTypeQuotaInfosProto getDefaultInstance() {
return defaultInstance;
}
public StorageTypeQuotaInfosProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private StorageTypeQuotaInfosProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
typeQuotaInfo_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
typeQuotaInfo_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.PARSER, extensionRegistry));
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
typeQuotaInfo_ = java.util.Collections.unmodifiableList(typeQuotaInfo_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public StorageTypeQuotaInfosProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new StorageTypeQuotaInfosProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
// repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
public static final int TYPEQUOTAINFO_FIELD_NUMBER = 1;
private java.util.List typeQuotaInfo_;
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public java.util.List getTypeQuotaInfoList() {
return typeQuotaInfo_;
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder>
getTypeQuotaInfoOrBuilderList() {
return typeQuotaInfo_;
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public int getTypeQuotaInfoCount() {
return typeQuotaInfo_.size();
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto getTypeQuotaInfo(int index) {
return typeQuotaInfo_.get(index);
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder getTypeQuotaInfoOrBuilder(
int index) {
return typeQuotaInfo_.get(index);
}
private void initFields() {
typeQuotaInfo_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
for (int i = 0; i < getTypeQuotaInfoCount(); i++) {
if (!getTypeQuotaInfo(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < typeQuotaInfo_.size(); i++) {
output.writeMessage(1, typeQuotaInfo_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
for (int i = 0; i < typeQuotaInfo_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, typeQuotaInfo_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto) obj;
boolean result = true;
result = result && getTypeQuotaInfoList()
.equals(other.getTypeQuotaInfoList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (getTypeQuotaInfoCount() > 0) {
hash = (37 * hash) + TYPEQUOTAINFO_FIELD_NUMBER;
hash = (53 * hash) + getTypeQuotaInfoList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.StorageTypeQuotaInfosProto}
*
*
**
* Storage type quota and usage information of a file or directory
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getTypeQuotaInfoFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (typeQuotaInfoBuilder_ == null) {
typeQuotaInfo_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
} else {
typeQuotaInfoBuilder_.clear();
}
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto(this);
int from_bitField0_ = bitField0_;
if (typeQuotaInfoBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001)) {
typeQuotaInfo_ = java.util.Collections.unmodifiableList(typeQuotaInfo_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.typeQuotaInfo_ = typeQuotaInfo_;
} else {
result.typeQuotaInfo_ = typeQuotaInfoBuilder_.build();
}
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance()) return this;
if (typeQuotaInfoBuilder_ == null) {
if (!other.typeQuotaInfo_.isEmpty()) {
if (typeQuotaInfo_.isEmpty()) {
typeQuotaInfo_ = other.typeQuotaInfo_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureTypeQuotaInfoIsMutable();
typeQuotaInfo_.addAll(other.typeQuotaInfo_);
}
onChanged();
}
} else {
if (!other.typeQuotaInfo_.isEmpty()) {
if (typeQuotaInfoBuilder_.isEmpty()) {
typeQuotaInfoBuilder_.dispose();
typeQuotaInfoBuilder_ = null;
typeQuotaInfo_ = other.typeQuotaInfo_;
bitField0_ = (bitField0_ & ~0x00000001);
typeQuotaInfoBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getTypeQuotaInfoFieldBuilder() : null;
} else {
typeQuotaInfoBuilder_.addAllMessages(other.typeQuotaInfo_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
for (int i = 0; i < getTypeQuotaInfoCount(); i++) {
if (!getTypeQuotaInfo(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
private java.util.List typeQuotaInfo_ =
java.util.Collections.emptyList();
private void ensureTypeQuotaInfoIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
typeQuotaInfo_ = new java.util.ArrayList(typeQuotaInfo_);
bitField0_ |= 0x00000001;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder> typeQuotaInfoBuilder_;
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public java.util.List getTypeQuotaInfoList() {
if (typeQuotaInfoBuilder_ == null) {
return java.util.Collections.unmodifiableList(typeQuotaInfo_);
} else {
return typeQuotaInfoBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public int getTypeQuotaInfoCount() {
if (typeQuotaInfoBuilder_ == null) {
return typeQuotaInfo_.size();
} else {
return typeQuotaInfoBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto getTypeQuotaInfo(int index) {
if (typeQuotaInfoBuilder_ == null) {
return typeQuotaInfo_.get(index);
} else {
return typeQuotaInfoBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public Builder setTypeQuotaInfo(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto value) {
if (typeQuotaInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTypeQuotaInfoIsMutable();
typeQuotaInfo_.set(index, value);
onChanged();
} else {
typeQuotaInfoBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public Builder setTypeQuotaInfo(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder builderForValue) {
if (typeQuotaInfoBuilder_ == null) {
ensureTypeQuotaInfoIsMutable();
typeQuotaInfo_.set(index, builderForValue.build());
onChanged();
} else {
typeQuotaInfoBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public Builder addTypeQuotaInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto value) {
if (typeQuotaInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTypeQuotaInfoIsMutable();
typeQuotaInfo_.add(value);
onChanged();
} else {
typeQuotaInfoBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public Builder addTypeQuotaInfo(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto value) {
if (typeQuotaInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTypeQuotaInfoIsMutable();
typeQuotaInfo_.add(index, value);
onChanged();
} else {
typeQuotaInfoBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public Builder addTypeQuotaInfo(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder builderForValue) {
if (typeQuotaInfoBuilder_ == null) {
ensureTypeQuotaInfoIsMutable();
typeQuotaInfo_.add(builderForValue.build());
onChanged();
} else {
typeQuotaInfoBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public Builder addTypeQuotaInfo(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder builderForValue) {
if (typeQuotaInfoBuilder_ == null) {
ensureTypeQuotaInfoIsMutable();
typeQuotaInfo_.add(index, builderForValue.build());
onChanged();
} else {
typeQuotaInfoBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public Builder addAllTypeQuotaInfo(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto> values) {
if (typeQuotaInfoBuilder_ == null) {
ensureTypeQuotaInfoIsMutable();
super.addAll(values, typeQuotaInfo_);
onChanged();
} else {
typeQuotaInfoBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public Builder clearTypeQuotaInfo() {
if (typeQuotaInfoBuilder_ == null) {
typeQuotaInfo_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
} else {
typeQuotaInfoBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public Builder removeTypeQuotaInfo(int index) {
if (typeQuotaInfoBuilder_ == null) {
ensureTypeQuotaInfoIsMutable();
typeQuotaInfo_.remove(index);
onChanged();
} else {
typeQuotaInfoBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder getTypeQuotaInfoBuilder(
int index) {
return getTypeQuotaInfoFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder getTypeQuotaInfoOrBuilder(
int index) {
if (typeQuotaInfoBuilder_ == null) {
return typeQuotaInfo_.get(index); } else {
return typeQuotaInfoBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder>
getTypeQuotaInfoOrBuilderList() {
if (typeQuotaInfoBuilder_ != null) {
return typeQuotaInfoBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(typeQuotaInfo_);
}
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder addTypeQuotaInfoBuilder() {
return getTypeQuotaInfoFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder addTypeQuotaInfoBuilder(
int index) {
return getTypeQuotaInfoFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public java.util.List
getTypeQuotaInfoBuilderList() {
return getTypeQuotaInfoFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder>
getTypeQuotaInfoFieldBuilder() {
if (typeQuotaInfoBuilder_ == null) {
typeQuotaInfoBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder>(
typeQuotaInfo_,
((bitField0_ & 0x00000001) == 0x00000001),
getParentForChildren(),
isClean());
typeQuotaInfo_ = null;
}
return typeQuotaInfoBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageTypeQuotaInfosProto)
}
static {
defaultInstance = new StorageTypeQuotaInfosProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageTypeQuotaInfosProto)
}
public interface StorageTypeQuotaInfoProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.StorageTypeProto type = 1;
/**
* required .hadoop.hdfs.StorageTypeProto type = 1;
*/
boolean hasType();
/**
* required .hadoop.hdfs.StorageTypeProto type = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getType();
// required uint64 quota = 2;
/**
* required uint64 quota = 2;
*/
boolean hasQuota();
/**
* required uint64 quota = 2;
*/
long getQuota();
// required uint64 consumed = 3;
/**
* required uint64 consumed = 3;
*/
boolean hasConsumed();
/**
* required uint64 consumed = 3;
*/
long getConsumed();
}
/**
* Protobuf type {@code hadoop.hdfs.StorageTypeQuotaInfoProto}
*/
public static final class StorageTypeQuotaInfoProto extends
com.google.protobuf.GeneratedMessage
implements StorageTypeQuotaInfoProtoOrBuilder {
// Use StorageTypeQuotaInfoProto.newBuilder() to construct.
private StorageTypeQuotaInfoProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private StorageTypeQuotaInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final StorageTypeQuotaInfoProto defaultInstance;
public static StorageTypeQuotaInfoProto getDefaultInstance() {
return defaultInstance;
}
public StorageTypeQuotaInfoProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private StorageTypeQuotaInfoProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
type_ = value;
}
break;
}
case 16: {
bitField0_ |= 0x00000002;
quota_ = input.readUInt64();
break;
}
case 24: {
bitField0_ |= 0x00000004;
consumed_ = input.readUInt64();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public StorageTypeQuotaInfoProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new StorageTypeQuotaInfoProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.StorageTypeProto type = 1;
public static final int TYPE_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto type_;
/**
* required .hadoop.hdfs.StorageTypeProto type = 1;
*/
public boolean hasType() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.StorageTypeProto type = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getType() {
return type_;
}
// required uint64 quota = 2;
public static final int QUOTA_FIELD_NUMBER = 2;
private long quota_;
/**
* required uint64 quota = 2;
*/
public boolean hasQuota() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 quota = 2;
*/
public long getQuota() {
return quota_;
}
// required uint64 consumed = 3;
public static final int CONSUMED_FIELD_NUMBER = 3;
private long consumed_;
/**
* required uint64 consumed = 3;
*/
public boolean hasConsumed() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 consumed = 3;
*/
public long getConsumed() {
return consumed_;
}
private void initFields() {
type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
quota_ = 0L;
consumed_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasType()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasQuota()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasConsumed()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, type_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, quota_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt64(3, consumed_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, type_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, quota_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(3, consumed_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto) obj;
boolean result = true;
result = result && (hasType() == other.hasType());
if (hasType()) {
result = result &&
(getType() == other.getType());
}
result = result && (hasQuota() == other.hasQuota());
if (hasQuota()) {
result = result && (getQuota()
== other.getQuota());
}
result = result && (hasConsumed() == other.hasConsumed());
if (hasConsumed()) {
result = result && (getConsumed()
== other.getConsumed());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasType()) {
hash = (37 * hash) + TYPE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getType());
}
if (hasQuota()) {
hash = (37 * hash) + QUOTA_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getQuota());
}
if (hasConsumed()) {
hash = (37 * hash) + CONSUMED_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getConsumed());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.StorageTypeQuotaInfoProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
bitField0_ = (bitField0_ & ~0x00000001);
quota_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
consumed_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.type_ = type_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.quota_ = quota_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.consumed_ = consumed_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.getDefaultInstance()) return this;
if (other.hasType()) {
setType(other.getType());
}
if (other.hasQuota()) {
setQuota(other.getQuota());
}
if (other.hasConsumed()) {
setConsumed(other.getConsumed());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasType()) {
return false;
}
if (!hasQuota()) {
return false;
}
if (!hasConsumed()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.StorageTypeProto type = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
/**
* required .hadoop.hdfs.StorageTypeProto type = 1;
*/
public boolean hasType() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.StorageTypeProto type = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getType() {
return type_;
}
/**
* required .hadoop.hdfs.StorageTypeProto type = 1;
*/
public Builder setType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
type_ = value;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.StorageTypeProto type = 1;
*/
public Builder clearType() {
bitField0_ = (bitField0_ & ~0x00000001);
type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
onChanged();
return this;
}
// required uint64 quota = 2;
private long quota_ ;
/**
* required uint64 quota = 2;
*/
public boolean hasQuota() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 quota = 2;
*/
public long getQuota() {
return quota_;
}
/**
* required uint64 quota = 2;
*/
public Builder setQuota(long value) {
bitField0_ |= 0x00000002;
quota_ = value;
onChanged();
return this;
}
/**
* required uint64 quota = 2;
*/
public Builder clearQuota() {
bitField0_ = (bitField0_ & ~0x00000002);
quota_ = 0L;
onChanged();
return this;
}
// required uint64 consumed = 3;
private long consumed_ ;
/**
* required uint64 consumed = 3;
*/
public boolean hasConsumed() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 consumed = 3;
*/
public long getConsumed() {
return consumed_;
}
/**
* required uint64 consumed = 3;
*/
public Builder setConsumed(long value) {
bitField0_ |= 0x00000004;
consumed_ = value;
onChanged();
return this;
}
/**
* required uint64 consumed = 3;
*/
public Builder clearConsumed() {
bitField0_ = (bitField0_ & ~0x00000004);
consumed_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageTypeQuotaInfoProto)
}
static {
defaultInstance = new StorageTypeQuotaInfoProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageTypeQuotaInfoProto)
}
public interface CorruptFileBlocksProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated string files = 1;
/**
* repeated string files = 1;
*/
java.util.List
getFilesList();
/**
* repeated string files = 1;
*/
int getFilesCount();
/**
* repeated string files = 1;
*/
java.lang.String getFiles(int index);
/**
* repeated string files = 1;
*/
com.google.protobuf.ByteString
getFilesBytes(int index);
// required string cookie = 2;
/**
* required string cookie = 2;
*/
boolean hasCookie();
/**
* required string cookie = 2;
*/
java.lang.String getCookie();
/**
* required string cookie = 2;
*/
com.google.protobuf.ByteString
getCookieBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.CorruptFileBlocksProto}
*
*
**
* Contains a list of paths corresponding to corrupt files and a cookie
* used for iterative calls to NameNode.listCorruptFileBlocks.
*
*/
public static final class CorruptFileBlocksProto extends
com.google.protobuf.GeneratedMessage
implements CorruptFileBlocksProtoOrBuilder {
// Use CorruptFileBlocksProto.newBuilder() to construct.
private CorruptFileBlocksProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private CorruptFileBlocksProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final CorruptFileBlocksProto defaultInstance;
public static CorruptFileBlocksProto getDefaultInstance() {
return defaultInstance;
}
public CorruptFileBlocksProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private CorruptFileBlocksProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
files_ = new com.google.protobuf.LazyStringArrayList();
mutable_bitField0_ |= 0x00000001;
}
files_.add(input.readBytes());
break;
}
case 18: {
bitField0_ |= 0x00000001;
cookie_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
files_ = new com.google.protobuf.UnmodifiableLazyStringList(files_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CorruptFileBlocksProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CorruptFileBlocksProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public CorruptFileBlocksProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new CorruptFileBlocksProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// repeated string files = 1;
public static final int FILES_FIELD_NUMBER = 1;
private com.google.protobuf.LazyStringList files_;
/**
* repeated string files = 1;
*/
public java.util.List
getFilesList() {
return files_;
}
/**
* repeated string files = 1;
*/
public int getFilesCount() {
return files_.size();
}
/**
* repeated string files = 1;
*/
public java.lang.String getFiles(int index) {
return files_.get(index);
}
/**
* repeated string files = 1;
*/
public com.google.protobuf.ByteString
getFilesBytes(int index) {
return files_.getByteString(index);
}
// required string cookie = 2;
public static final int COOKIE_FIELD_NUMBER = 2;
private java.lang.Object cookie_;
/**
* required string cookie = 2;
*/
public boolean hasCookie() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string cookie = 2;
*/
public java.lang.String getCookie() {
java.lang.Object ref = cookie_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
cookie_ = s;
}
return s;
}
}
/**
* required string cookie = 2;
*/
public com.google.protobuf.ByteString
getCookieBytes() {
java.lang.Object ref = cookie_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
cookie_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
files_ = com.google.protobuf.LazyStringArrayList.EMPTY;
cookie_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasCookie()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < files_.size(); i++) {
output.writeBytes(1, files_.getByteString(i));
}
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(2, getCookieBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
{
int dataSize = 0;
for (int i = 0; i < files_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeBytesSizeNoTag(files_.getByteString(i));
}
size += dataSize;
size += 1 * getFilesList().size();
}
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getCookieBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto) obj;
boolean result = true;
result = result && getFilesList()
.equals(other.getFilesList());
result = result && (hasCookie() == other.hasCookie());
if (hasCookie()) {
result = result && getCookie()
.equals(other.getCookie());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (getFilesCount() > 0) {
hash = (37 * hash) + FILES_FIELD_NUMBER;
hash = (53 * hash) + getFilesList().hashCode();
}
if (hasCookie()) {
hash = (37 * hash) + COOKIE_FIELD_NUMBER;
hash = (53 * hash) + getCookie().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.CorruptFileBlocksProto}
*
*
**
* Contains a list of paths corresponding to corrupt files and a cookie
* used for iterative calls to NameNode.listCorruptFileBlocks.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CorruptFileBlocksProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CorruptFileBlocksProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
files_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000001);
cookie_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CorruptFileBlocksProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
files_ = new com.google.protobuf.UnmodifiableLazyStringList(
files_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.files_ = files_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000001;
}
result.cookie_ = cookie_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDefaultInstance()) return this;
if (!other.files_.isEmpty()) {
if (files_.isEmpty()) {
files_ = other.files_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureFilesIsMutable();
files_.addAll(other.files_);
}
onChanged();
}
if (other.hasCookie()) {
bitField0_ |= 0x00000002;
cookie_ = other.cookie_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasCookie()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// repeated string files = 1;
private com.google.protobuf.LazyStringList files_ = com.google.protobuf.LazyStringArrayList.EMPTY;
private void ensureFilesIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
files_ = new com.google.protobuf.LazyStringArrayList(files_);
bitField0_ |= 0x00000001;
}
}
/**
* repeated string files = 1;
*/
public java.util.List
getFilesList() {
return java.util.Collections.unmodifiableList(files_);
}
/**
* repeated string files = 1;
*/
public int getFilesCount() {
return files_.size();
}
/**
* repeated string files = 1;
*/
public java.lang.String getFiles(int index) {
return files_.get(index);
}
/**
* repeated string files = 1;
*/
public com.google.protobuf.ByteString
getFilesBytes(int index) {
return files_.getByteString(index);
}
/**
* repeated string files = 1;
*/
public Builder setFiles(
int index, java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureFilesIsMutable();
files_.set(index, value);
onChanged();
return this;
}
/**
* repeated string files = 1;
*/
public Builder addFiles(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureFilesIsMutable();
files_.add(value);
onChanged();
return this;
}
/**
* repeated string files = 1;
*/
public Builder addAllFiles(
java.lang.Iterable values) {
ensureFilesIsMutable();
super.addAll(values, files_);
onChanged();
return this;
}
/**
* repeated string files = 1;
*/
public Builder clearFiles() {
files_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
return this;
}
/**
* repeated string files = 1;
*/
public Builder addFilesBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
ensureFilesIsMutable();
files_.add(value);
onChanged();
return this;
}
// required string cookie = 2;
private java.lang.Object cookie_ = "";
/**
* required string cookie = 2;
*/
public boolean hasCookie() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string cookie = 2;
*/
public java.lang.String getCookie() {
java.lang.Object ref = cookie_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
cookie_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string cookie = 2;
*/
public com.google.protobuf.ByteString
getCookieBytes() {
java.lang.Object ref = cookie_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
cookie_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string cookie = 2;
*/
public Builder setCookie(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
cookie_ = value;
onChanged();
return this;
}
/**
* required string cookie = 2;
*/
public Builder clearCookie() {
bitField0_ = (bitField0_ & ~0x00000002);
cookie_ = getDefaultInstance().getCookie();
onChanged();
return this;
}
/**
* required string cookie = 2;
*/
public Builder setCookieBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
cookie_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.CorruptFileBlocksProto)
}
static {
defaultInstance = new CorruptFileBlocksProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.CorruptFileBlocksProto)
}
public interface FsPermissionProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required uint32 perm = 1;
/**
* required uint32 perm = 1;
*
*
* Actually a short - only 16bits used
*
*/
boolean hasPerm();
/**
* required uint32 perm = 1;
*
*
* Actually a short - only 16bits used
*
*/
int getPerm();
}
/**
* Protobuf type {@code hadoop.hdfs.FsPermissionProto}
*
*
**
* File or Directory permision - same spec as posix
*
*/
public static final class FsPermissionProto extends
com.google.protobuf.GeneratedMessage
implements FsPermissionProtoOrBuilder {
// Use FsPermissionProto.newBuilder() to construct.
private FsPermissionProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private FsPermissionProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final FsPermissionProto defaultInstance;
public static FsPermissionProto getDefaultInstance() {
return defaultInstance;
}
public FsPermissionProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private FsPermissionProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
perm_ = input.readUInt32();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsPermissionProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsPermissionProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public FsPermissionProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new FsPermissionProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required uint32 perm = 1;
public static final int PERM_FIELD_NUMBER = 1;
private int perm_;
/**
* required uint32 perm = 1;
*
*
* Actually a short - only 16bits used
*
*/
public boolean hasPerm() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint32 perm = 1;
*
*
* Actually a short - only 16bits used
*
*/
public int getPerm() {
return perm_;
}
private void initFields() {
perm_ = 0;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasPerm()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt32(1, perm_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(1, perm_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto) obj;
boolean result = true;
result = result && (hasPerm() == other.hasPerm());
if (hasPerm()) {
result = result && (getPerm()
== other.getPerm());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasPerm()) {
hash = (37 * hash) + PERM_FIELD_NUMBER;
hash = (53 * hash) + getPerm();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.FsPermissionProto}
*
*
**
* File or Directory permision - same spec as posix
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsPermissionProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsPermissionProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
perm_ = 0;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsPermissionProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.perm_ = perm_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance()) return this;
if (other.hasPerm()) {
setPerm(other.getPerm());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasPerm()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required uint32 perm = 1;
private int perm_ ;
/**
* required uint32 perm = 1;
*
*
* Actually a short - only 16bits used
*
*/
public boolean hasPerm() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint32 perm = 1;
*
*
* Actually a short - only 16bits used
*
*/
public int getPerm() {
return perm_;
}
/**
* required uint32 perm = 1;
*
*
* Actually a short - only 16bits used
*
*/
public Builder setPerm(int value) {
bitField0_ |= 0x00000001;
perm_ = value;
onChanged();
return this;
}
/**
* required uint32 perm = 1;
*
*
* Actually a short - only 16bits used
*
*/
public Builder clearPerm() {
bitField0_ = (bitField0_ & ~0x00000001);
perm_ = 0;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.FsPermissionProto)
}
static {
defaultInstance = new FsPermissionProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.FsPermissionProto)
}
public interface StorageTypesProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
*/
java.util.List getStorageTypesList();
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
*/
int getStorageTypesCount();
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index);
}
/**
* Protobuf type {@code hadoop.hdfs.StorageTypesProto}
*
*
**
* A list of storage types.
*
*/
public static final class StorageTypesProto extends
com.google.protobuf.GeneratedMessage
implements StorageTypesProtoOrBuilder {
// Use StorageTypesProto.newBuilder() to construct.
private StorageTypesProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private StorageTypesProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final StorageTypesProto defaultInstance;
public static StorageTypesProto getDefaultInstance() {
return defaultInstance;
}
public StorageTypesProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private StorageTypesProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
storageTypes_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
storageTypes_.add(value);
}
break;
}
case 10: {
int length = input.readRawVarint32();
int oldLimit = input.pushLimit(length);
while(input.getBytesUntilLimit() > 0) {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
storageTypes_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
storageTypes_.add(value);
}
}
input.popLimit(oldLimit);
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
storageTypes_ = java.util.Collections.unmodifiableList(storageTypes_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypesProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypesProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public StorageTypesProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new StorageTypesProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
// repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
public static final int STORAGETYPES_FIELD_NUMBER = 1;
private java.util.List storageTypes_;
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
*/
public java.util.List getStorageTypesList() {
return storageTypes_;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
*/
public int getStorageTypesCount() {
return storageTypes_.size();
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) {
return storageTypes_.get(index);
}
private void initFields() {
storageTypes_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < storageTypes_.size(); i++) {
output.writeEnum(1, storageTypes_.get(i).getNumber());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
{
int dataSize = 0;
for (int i = 0; i < storageTypes_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeEnumSizeNoTag(storageTypes_.get(i).getNumber());
}
size += dataSize;
size += 1 * storageTypes_.size();
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto) obj;
boolean result = true;
result = result && getStorageTypesList()
.equals(other.getStorageTypesList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (getStorageTypesCount() > 0) {
hash = (37 * hash) + STORAGETYPES_FIELD_NUMBER;
hash = (53 * hash) + hashEnumList(getStorageTypesList());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.StorageTypesProto}
*
*
**
* A list of storage types.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypesProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypesProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
storageTypes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypesProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto(this);
int from_bitField0_ = bitField0_;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
storageTypes_ = java.util.Collections.unmodifiableList(storageTypes_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.storageTypes_ = storageTypes_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance()) return this;
if (!other.storageTypes_.isEmpty()) {
if (storageTypes_.isEmpty()) {
storageTypes_ = other.storageTypes_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureStorageTypesIsMutable();
storageTypes_.addAll(other.storageTypes_);
}
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
private java.util.List storageTypes_ =
java.util.Collections.emptyList();
private void ensureStorageTypesIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
storageTypes_ = new java.util.ArrayList(storageTypes_);
bitField0_ |= 0x00000001;
}
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
*/
public java.util.List getStorageTypesList() {
return java.util.Collections.unmodifiableList(storageTypes_);
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
*/
public int getStorageTypesCount() {
return storageTypes_.size();
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) {
return storageTypes_.get(index);
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
*/
public Builder setStorageTypes(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
ensureStorageTypesIsMutable();
storageTypes_.set(index, value);
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
*/
public Builder addStorageTypes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
ensureStorageTypesIsMutable();
storageTypes_.add(value);
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
*/
public Builder addAllStorageTypes(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> values) {
ensureStorageTypesIsMutable();
super.addAll(values, storageTypes_);
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
*/
public Builder clearStorageTypes() {
storageTypes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageTypesProto)
}
static {
defaultInstance = new StorageTypesProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageTypesProto)
}
public interface BlockStoragePolicyProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required uint32 policyId = 1;
/**
* required uint32 policyId = 1;
*/
boolean hasPolicyId();
/**
* required uint32 policyId = 1;
*/
int getPolicyId();
// required string name = 2;
/**
* required string name = 2;
*/
boolean hasName();
/**
* required string name = 2;
*/
java.lang.String getName();
/**
* required string name = 2;
*/
com.google.protobuf.ByteString
getNameBytes();
// required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
/**
* required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
*
*
* a list of storage types for storing the block replicas when creating a
* block.
*
*/
boolean hasCreationPolicy();
/**
* required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
*
*
* a list of storage types for storing the block replicas when creating a
* block.
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationPolicy();
/**
* required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
*
*
* a list of storage types for storing the block replicas when creating a
* block.
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationPolicyOrBuilder();
// optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
/**
* optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
*
*
* A list of storage types for creation fallback storage.
*
*/
boolean hasCreationFallbackPolicy();
/**
* optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
*
*
* A list of storage types for creation fallback storage.
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationFallbackPolicy();
/**
* optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
*
*
* A list of storage types for creation fallback storage.
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationFallbackPolicyOrBuilder();
// optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
/**
* optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
*/
boolean hasReplicationFallbackPolicy();
/**
* optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getReplicationFallbackPolicy();
/**
* optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getReplicationFallbackPolicyOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.BlockStoragePolicyProto}
*
*
**
* Block replica storage policy.
*
*/
public static final class BlockStoragePolicyProto extends
com.google.protobuf.GeneratedMessage
implements BlockStoragePolicyProtoOrBuilder {
// Use BlockStoragePolicyProto.newBuilder() to construct.
private BlockStoragePolicyProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private BlockStoragePolicyProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final BlockStoragePolicyProto defaultInstance;
public static BlockStoragePolicyProto getDefaultInstance() {
return defaultInstance;
}
public BlockStoragePolicyProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private BlockStoragePolicyProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
policyId_ = input.readUInt32();
break;
}
case 18: {
bitField0_ |= 0x00000002;
name_ = input.readBytes();
break;
}
case 26: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
subBuilder = creationPolicy_.toBuilder();
}
creationPolicy_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(creationPolicy_);
creationPolicy_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000004;
break;
}
case 34: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000008) == 0x00000008)) {
subBuilder = creationFallbackPolicy_.toBuilder();
}
creationFallbackPolicy_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(creationFallbackPolicy_);
creationFallbackPolicy_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000008;
break;
}
case 42: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000010) == 0x00000010)) {
subBuilder = replicationFallbackPolicy_.toBuilder();
}
replicationFallbackPolicy_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(replicationFallbackPolicy_);
replicationFallbackPolicy_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000010;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockStoragePolicyProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockStoragePolicyProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public BlockStoragePolicyProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new BlockStoragePolicyProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required uint32 policyId = 1;
public static final int POLICYID_FIELD_NUMBER = 1;
private int policyId_;
/**
* required uint32 policyId = 1;
*/
public boolean hasPolicyId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint32 policyId = 1;
*/
public int getPolicyId() {
return policyId_;
}
// required string name = 2;
public static final int NAME_FIELD_NUMBER = 2;
private java.lang.Object name_;
/**
* required string name = 2;
*/
public boolean hasName() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string name = 2;
*/
public java.lang.String getName() {
java.lang.Object ref = name_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
name_ = s;
}
return s;
}
}
/**
* required string name = 2;
*/
public com.google.protobuf.ByteString
getNameBytes() {
java.lang.Object ref = name_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
name_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
public static final int CREATIONPOLICY_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto creationPolicy_;
/**
* required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
*
*
* a list of storage types for storing the block replicas when creating a
* block.
*
*/
public boolean hasCreationPolicy() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
*
*
* a list of storage types for storing the block replicas when creating a
* block.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationPolicy() {
return creationPolicy_;
}
/**
* required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
*
*
* a list of storage types for storing the block replicas when creating a
* block.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationPolicyOrBuilder() {
return creationPolicy_;
}
// optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
public static final int CREATIONFALLBACKPOLICY_FIELD_NUMBER = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto creationFallbackPolicy_;
/**
* optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
*
*
* A list of storage types for creation fallback storage.
*
*/
public boolean hasCreationFallbackPolicy() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
*
*
* A list of storage types for creation fallback storage.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationFallbackPolicy() {
return creationFallbackPolicy_;
}
/**
* optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
*
*
* A list of storage types for creation fallback storage.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationFallbackPolicyOrBuilder() {
return creationFallbackPolicy_;
}
// optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
public static final int REPLICATIONFALLBACKPOLICY_FIELD_NUMBER = 5;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto replicationFallbackPolicy_;
/**
* optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
*/
public boolean hasReplicationFallbackPolicy() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getReplicationFallbackPolicy() {
return replicationFallbackPolicy_;
}
/**
* optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getReplicationFallbackPolicyOrBuilder() {
return replicationFallbackPolicy_;
}
private void initFields() {
policyId_ = 0;
name_ = "";
creationPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance();
creationFallbackPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance();
replicationFallbackPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasPolicyId()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasName()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasCreationPolicy()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt32(1, policyId_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getNameBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeMessage(3, creationPolicy_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeMessage(4, creationFallbackPolicy_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeMessage(5, replicationFallbackPolicy_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(1, policyId_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getNameBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, creationPolicy_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(4, creationFallbackPolicy_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(5, replicationFallbackPolicy_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto) obj;
boolean result = true;
result = result && (hasPolicyId() == other.hasPolicyId());
if (hasPolicyId()) {
result = result && (getPolicyId()
== other.getPolicyId());
}
result = result && (hasName() == other.hasName());
if (hasName()) {
result = result && getName()
.equals(other.getName());
}
result = result && (hasCreationPolicy() == other.hasCreationPolicy());
if (hasCreationPolicy()) {
result = result && getCreationPolicy()
.equals(other.getCreationPolicy());
}
result = result && (hasCreationFallbackPolicy() == other.hasCreationFallbackPolicy());
if (hasCreationFallbackPolicy()) {
result = result && getCreationFallbackPolicy()
.equals(other.getCreationFallbackPolicy());
}
result = result && (hasReplicationFallbackPolicy() == other.hasReplicationFallbackPolicy());
if (hasReplicationFallbackPolicy()) {
result = result && getReplicationFallbackPolicy()
.equals(other.getReplicationFallbackPolicy());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasPolicyId()) {
hash = (37 * hash) + POLICYID_FIELD_NUMBER;
hash = (53 * hash) + getPolicyId();
}
if (hasName()) {
hash = (37 * hash) + NAME_FIELD_NUMBER;
hash = (53 * hash) + getName().hashCode();
}
if (hasCreationPolicy()) {
hash = (37 * hash) + CREATIONPOLICY_FIELD_NUMBER;
hash = (53 * hash) + getCreationPolicy().hashCode();
}
if (hasCreationFallbackPolicy()) {
hash = (37 * hash) + CREATIONFALLBACKPOLICY_FIELD_NUMBER;
hash = (53 * hash) + getCreationFallbackPolicy().hashCode();
}
if (hasReplicationFallbackPolicy()) {
hash = (37 * hash) + REPLICATIONFALLBACKPOLICY_FIELD_NUMBER;
hash = (53 * hash) + getReplicationFallbackPolicy().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.BlockStoragePolicyProto}
*
*
**
* Block replica storage policy.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockStoragePolicyProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockStoragePolicyProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getCreationPolicyFieldBuilder();
getCreationFallbackPolicyFieldBuilder();
getReplicationFallbackPolicyFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
policyId_ = 0;
bitField0_ = (bitField0_ & ~0x00000001);
name_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
if (creationPolicyBuilder_ == null) {
creationPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance();
} else {
creationPolicyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
if (creationFallbackPolicyBuilder_ == null) {
creationFallbackPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance();
} else {
creationFallbackPolicyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
if (replicationFallbackPolicyBuilder_ == null) {
replicationFallbackPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance();
} else {
replicationFallbackPolicyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockStoragePolicyProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.policyId_ = policyId_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.name_ = name_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
if (creationPolicyBuilder_ == null) {
result.creationPolicy_ = creationPolicy_;
} else {
result.creationPolicy_ = creationPolicyBuilder_.build();
}
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
if (creationFallbackPolicyBuilder_ == null) {
result.creationFallbackPolicy_ = creationFallbackPolicy_;
} else {
result.creationFallbackPolicy_ = creationFallbackPolicyBuilder_.build();
}
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
if (replicationFallbackPolicyBuilder_ == null) {
result.replicationFallbackPolicy_ = replicationFallbackPolicy_;
} else {
result.replicationFallbackPolicy_ = replicationFallbackPolicyBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.getDefaultInstance()) return this;
if (other.hasPolicyId()) {
setPolicyId(other.getPolicyId());
}
if (other.hasName()) {
bitField0_ |= 0x00000002;
name_ = other.name_;
onChanged();
}
if (other.hasCreationPolicy()) {
mergeCreationPolicy(other.getCreationPolicy());
}
if (other.hasCreationFallbackPolicy()) {
mergeCreationFallbackPolicy(other.getCreationFallbackPolicy());
}
if (other.hasReplicationFallbackPolicy()) {
mergeReplicationFallbackPolicy(other.getReplicationFallbackPolicy());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasPolicyId()) {
return false;
}
if (!hasName()) {
return false;
}
if (!hasCreationPolicy()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required uint32 policyId = 1;
private int policyId_ ;
/**
* required uint32 policyId = 1;
*/
public boolean hasPolicyId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint32 policyId = 1;
*/
public int getPolicyId() {
return policyId_;
}
/**
* required uint32 policyId = 1;
*/
public Builder setPolicyId(int value) {
bitField0_ |= 0x00000001;
policyId_ = value;
onChanged();
return this;
}
/**
* required uint32 policyId = 1;
*/
public Builder clearPolicyId() {
bitField0_ = (bitField0_ & ~0x00000001);
policyId_ = 0;
onChanged();
return this;
}
// required string name = 2;
private java.lang.Object name_ = "";
/**
* required string name = 2;
*/
public boolean hasName() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string name = 2;
*/
public java.lang.String getName() {
java.lang.Object ref = name_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
name_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string name = 2;
*/
public com.google.protobuf.ByteString
getNameBytes() {
java.lang.Object ref = name_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
name_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string name = 2;
*/
public Builder setName(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
name_ = value;
onChanged();
return this;
}
/**
* required string name = 2;
*/
public Builder clearName() {
bitField0_ = (bitField0_ & ~0x00000002);
name_ = getDefaultInstance().getName();
onChanged();
return this;
}
/**
* required string name = 2;
*/
public Builder setNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
name_ = value;
onChanged();
return this;
}
// required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto creationPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> creationPolicyBuilder_;
/**
* required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
*
*
* a list of storage types for storing the block replicas when creating a
* block.
*
*/
public boolean hasCreationPolicy() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
*
*
* a list of storage types for storing the block replicas when creating a
* block.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationPolicy() {
if (creationPolicyBuilder_ == null) {
return creationPolicy_;
} else {
return creationPolicyBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
*
*
* a list of storage types for storing the block replicas when creating a
* block.
*
*/
public Builder setCreationPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) {
if (creationPolicyBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
creationPolicy_ = value;
onChanged();
} else {
creationPolicyBuilder_.setMessage(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
*
*
* a list of storage types for storing the block replicas when creating a
* block.
*
*/
public Builder setCreationPolicy(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder builderForValue) {
if (creationPolicyBuilder_ == null) {
creationPolicy_ = builderForValue.build();
onChanged();
} else {
creationPolicyBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000004;
return this;
}
/**
* required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
*
*
* a list of storage types for storing the block replicas when creating a
* block.
*
*/
public Builder mergeCreationPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) {
if (creationPolicyBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004) &&
creationPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance()) {
creationPolicy_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.newBuilder(creationPolicy_).mergeFrom(value).buildPartial();
} else {
creationPolicy_ = value;
}
onChanged();
} else {
creationPolicyBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
*
*
* a list of storage types for storing the block replicas when creating a
* block.
*
*/
public Builder clearCreationPolicy() {
if (creationPolicyBuilder_ == null) {
creationPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance();
onChanged();
} else {
creationPolicyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
/**
* required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
*
*
* a list of storage types for storing the block replicas when creating a
* block.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder getCreationPolicyBuilder() {
bitField0_ |= 0x00000004;
onChanged();
return getCreationPolicyFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
*
*
* a list of storage types for storing the block replicas when creating a
* block.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationPolicyOrBuilder() {
if (creationPolicyBuilder_ != null) {
return creationPolicyBuilder_.getMessageOrBuilder();
} else {
return creationPolicy_;
}
}
/**
* required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
*
*
* a list of storage types for storing the block replicas when creating a
* block.
*
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder>
getCreationPolicyFieldBuilder() {
if (creationPolicyBuilder_ == null) {
creationPolicyBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder>(
creationPolicy_,
getParentForChildren(),
isClean());
creationPolicy_ = null;
}
return creationPolicyBuilder_;
}
// optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto creationFallbackPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> creationFallbackPolicyBuilder_;
/**
* optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
*
*
* A list of storage types for creation fallback storage.
*
*/
public boolean hasCreationFallbackPolicy() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
*
*
* A list of storage types for creation fallback storage.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationFallbackPolicy() {
if (creationFallbackPolicyBuilder_ == null) {
return creationFallbackPolicy_;
} else {
return creationFallbackPolicyBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
*
*
* A list of storage types for creation fallback storage.
*
*/
public Builder setCreationFallbackPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) {
if (creationFallbackPolicyBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
creationFallbackPolicy_ = value;
onChanged();
} else {
creationFallbackPolicyBuilder_.setMessage(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
*
*
* A list of storage types for creation fallback storage.
*
*/
public Builder setCreationFallbackPolicy(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder builderForValue) {
if (creationFallbackPolicyBuilder_ == null) {
creationFallbackPolicy_ = builderForValue.build();
onChanged();
} else {
creationFallbackPolicyBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
*
*
* A list of storage types for creation fallback storage.
*
*/
public Builder mergeCreationFallbackPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) {
if (creationFallbackPolicyBuilder_ == null) {
if (((bitField0_ & 0x00000008) == 0x00000008) &&
creationFallbackPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance()) {
creationFallbackPolicy_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.newBuilder(creationFallbackPolicy_).mergeFrom(value).buildPartial();
} else {
creationFallbackPolicy_ = value;
}
onChanged();
} else {
creationFallbackPolicyBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
*
*
* A list of storage types for creation fallback storage.
*
*/
public Builder clearCreationFallbackPolicy() {
if (creationFallbackPolicyBuilder_ == null) {
creationFallbackPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance();
onChanged();
} else {
creationFallbackPolicyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
/**
* optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
*
*
* A list of storage types for creation fallback storage.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder getCreationFallbackPolicyBuilder() {
bitField0_ |= 0x00000008;
onChanged();
return getCreationFallbackPolicyFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
*
*
* A list of storage types for creation fallback storage.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationFallbackPolicyOrBuilder() {
if (creationFallbackPolicyBuilder_ != null) {
return creationFallbackPolicyBuilder_.getMessageOrBuilder();
} else {
return creationFallbackPolicy_;
}
}
/**
* optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
*
*
* A list of storage types for creation fallback storage.
*
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder>
getCreationFallbackPolicyFieldBuilder() {
if (creationFallbackPolicyBuilder_ == null) {
creationFallbackPolicyBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder>(
creationFallbackPolicy_,
getParentForChildren(),
isClean());
creationFallbackPolicy_ = null;
}
return creationFallbackPolicyBuilder_;
}
// optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto replicationFallbackPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> replicationFallbackPolicyBuilder_;
/**
* optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
*/
public boolean hasReplicationFallbackPolicy() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getReplicationFallbackPolicy() {
if (replicationFallbackPolicyBuilder_ == null) {
return replicationFallbackPolicy_;
} else {
return replicationFallbackPolicyBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
*/
public Builder setReplicationFallbackPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) {
if (replicationFallbackPolicyBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
replicationFallbackPolicy_ = value;
onChanged();
} else {
replicationFallbackPolicyBuilder_.setMessage(value);
}
bitField0_ |= 0x00000010;
return this;
}
/**
* optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
*/
public Builder setReplicationFallbackPolicy(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder builderForValue) {
if (replicationFallbackPolicyBuilder_ == null) {
replicationFallbackPolicy_ = builderForValue.build();
onChanged();
} else {
replicationFallbackPolicyBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000010;
return this;
}
/**
* optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
*/
public Builder mergeReplicationFallbackPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) {
if (replicationFallbackPolicyBuilder_ == null) {
if (((bitField0_ & 0x00000010) == 0x00000010) &&
replicationFallbackPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance()) {
replicationFallbackPolicy_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.newBuilder(replicationFallbackPolicy_).mergeFrom(value).buildPartial();
} else {
replicationFallbackPolicy_ = value;
}
onChanged();
} else {
replicationFallbackPolicyBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000010;
return this;
}
/**
* optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
*/
public Builder clearReplicationFallbackPolicy() {
if (replicationFallbackPolicyBuilder_ == null) {
replicationFallbackPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance();
onChanged();
} else {
replicationFallbackPolicyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
/**
* optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder getReplicationFallbackPolicyBuilder() {
bitField0_ |= 0x00000010;
onChanged();
return getReplicationFallbackPolicyFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getReplicationFallbackPolicyOrBuilder() {
if (replicationFallbackPolicyBuilder_ != null) {
return replicationFallbackPolicyBuilder_.getMessageOrBuilder();
} else {
return replicationFallbackPolicy_;
}
}
/**
* optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder>
getReplicationFallbackPolicyFieldBuilder() {
if (replicationFallbackPolicyBuilder_ == null) {
replicationFallbackPolicyBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder>(
replicationFallbackPolicy_,
getParentForChildren(),
isClean());
replicationFallbackPolicy_ = null;
}
return replicationFallbackPolicyBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.BlockStoragePolicyProto)
}
static {
defaultInstance = new BlockStoragePolicyProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.BlockStoragePolicyProto)
}
public interface StorageUuidsProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated string storageUuids = 1;
/**
* repeated string storageUuids = 1;
*/
java.util.List
getStorageUuidsList();
/**
* repeated string storageUuids = 1;
*/
int getStorageUuidsCount();
/**
* repeated string storageUuids = 1;
*/
java.lang.String getStorageUuids(int index);
/**
* repeated string storageUuids = 1;
*/
com.google.protobuf.ByteString
getStorageUuidsBytes(int index);
}
/**
* Protobuf type {@code hadoop.hdfs.StorageUuidsProto}
*
*
**
* A list of storage IDs.
*
*/
public static final class StorageUuidsProto extends
com.google.protobuf.GeneratedMessage
implements StorageUuidsProtoOrBuilder {
// Use StorageUuidsProto.newBuilder() to construct.
private StorageUuidsProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private StorageUuidsProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final StorageUuidsProto defaultInstance;
public static StorageUuidsProto getDefaultInstance() {
return defaultInstance;
}
public StorageUuidsProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private StorageUuidsProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
storageUuids_ = new com.google.protobuf.LazyStringArrayList();
mutable_bitField0_ |= 0x00000001;
}
storageUuids_.add(input.readBytes());
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
storageUuids_ = new com.google.protobuf.UnmodifiableLazyStringList(storageUuids_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageUuidsProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageUuidsProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public StorageUuidsProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new StorageUuidsProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
// repeated string storageUuids = 1;
public static final int STORAGEUUIDS_FIELD_NUMBER = 1;
private com.google.protobuf.LazyStringList storageUuids_;
/**
* repeated string storageUuids = 1;
*/
public java.util.List
getStorageUuidsList() {
return storageUuids_;
}
/**
* repeated string storageUuids = 1;
*/
public int getStorageUuidsCount() {
return storageUuids_.size();
}
/**
* repeated string storageUuids = 1;
*/
public java.lang.String getStorageUuids(int index) {
return storageUuids_.get(index);
}
/**
* repeated string storageUuids = 1;
*/
public com.google.protobuf.ByteString
getStorageUuidsBytes(int index) {
return storageUuids_.getByteString(index);
}
private void initFields() {
storageUuids_ = com.google.protobuf.LazyStringArrayList.EMPTY;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < storageUuids_.size(); i++) {
output.writeBytes(1, storageUuids_.getByteString(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
{
int dataSize = 0;
for (int i = 0; i < storageUuids_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeBytesSizeNoTag(storageUuids_.getByteString(i));
}
size += dataSize;
size += 1 * getStorageUuidsList().size();
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto) obj;
boolean result = true;
result = result && getStorageUuidsList()
.equals(other.getStorageUuidsList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (getStorageUuidsCount() > 0) {
hash = (37 * hash) + STORAGEUUIDS_FIELD_NUMBER;
hash = (53 * hash) + getStorageUuidsList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.StorageUuidsProto}
*
*
**
* A list of storage IDs.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageUuidsProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageUuidsProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
storageUuids_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageUuidsProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto(this);
int from_bitField0_ = bitField0_;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
storageUuids_ = new com.google.protobuf.UnmodifiableLazyStringList(
storageUuids_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.storageUuids_ = storageUuids_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.getDefaultInstance()) return this;
if (!other.storageUuids_.isEmpty()) {
if (storageUuids_.isEmpty()) {
storageUuids_ = other.storageUuids_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureStorageUuidsIsMutable();
storageUuids_.addAll(other.storageUuids_);
}
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// repeated string storageUuids = 1;
private com.google.protobuf.LazyStringList storageUuids_ = com.google.protobuf.LazyStringArrayList.EMPTY;
private void ensureStorageUuidsIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
storageUuids_ = new com.google.protobuf.LazyStringArrayList(storageUuids_);
bitField0_ |= 0x00000001;
}
}
/**
* repeated string storageUuids = 1;
*/
public java.util.List
getStorageUuidsList() {
return java.util.Collections.unmodifiableList(storageUuids_);
}
/**
* repeated string storageUuids = 1;
*/
public int getStorageUuidsCount() {
return storageUuids_.size();
}
/**
* repeated string storageUuids = 1;
*/
public java.lang.String getStorageUuids(int index) {
return storageUuids_.get(index);
}
/**
* repeated string storageUuids = 1;
*/
public com.google.protobuf.ByteString
getStorageUuidsBytes(int index) {
return storageUuids_.getByteString(index);
}
/**
* repeated string storageUuids = 1;
*/
public Builder setStorageUuids(
int index, java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureStorageUuidsIsMutable();
storageUuids_.set(index, value);
onChanged();
return this;
}
/**
* repeated string storageUuids = 1;
*/
public Builder addStorageUuids(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureStorageUuidsIsMutable();
storageUuids_.add(value);
onChanged();
return this;
}
/**
* repeated string storageUuids = 1;
*/
public Builder addAllStorageUuids(
java.lang.Iterable values) {
ensureStorageUuidsIsMutable();
super.addAll(values, storageUuids_);
onChanged();
return this;
}
/**
* repeated string storageUuids = 1;
*/
public Builder clearStorageUuids() {
storageUuids_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
return this;
}
/**
* repeated string storageUuids = 1;
*/
public Builder addStorageUuidsBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
ensureStorageUuidsIsMutable();
storageUuids_.add(value);
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageUuidsProto)
}
static {
defaultInstance = new StorageUuidsProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageUuidsProto)
}
public interface LocatedBlockProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.ExtendedBlockProto b = 1;
/**
* required .hadoop.hdfs.ExtendedBlockProto b = 1;
*/
boolean hasB();
/**
* required .hadoop.hdfs.ExtendedBlockProto b = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB();
/**
* required .hadoop.hdfs.ExtendedBlockProto b = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder();
// required uint64 offset = 2;
/**
* required uint64 offset = 2;
*
*
* offset of first byte of block in the file
*
*/
boolean hasOffset();
/**
* required uint64 offset = 2;
*
*
* offset of first byte of block in the file
*
*/
long getOffset();
// repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
java.util.List
getLocsList();
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getLocs(int index);
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
int getLocsCount();
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getLocsOrBuilderList();
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getLocsOrBuilder(
int index);
// required bool corrupt = 4;
/**
* required bool corrupt = 4;
*
*
* true if all replicas of a block are corrupt, else false
*
*/
boolean hasCorrupt();
/**
* required bool corrupt = 4;
*
*
* true if all replicas of a block are corrupt, else false
*
*/
boolean getCorrupt();
// required .hadoop.common.TokenProto blockToken = 5;
/**
* required .hadoop.common.TokenProto blockToken = 5;
*/
boolean hasBlockToken();
/**
* required .hadoop.common.TokenProto blockToken = 5;
*/
org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockToken();
/**
* required .hadoop.common.TokenProto blockToken = 5;
*/
org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokenOrBuilder();
// repeated bool isCached = 6 [packed = true];
/**
* repeated bool isCached = 6 [packed = true];
*
*
* if a location in locs is cached
*
*/
java.util.List getIsCachedList();
/**
* repeated bool isCached = 6 [packed = true];
*
*
* if a location in locs is cached
*
*/
int getIsCachedCount();
/**
* repeated bool isCached = 6 [packed = true];
*
*
* if a location in locs is cached
*
*/
boolean getIsCached(int index);
// repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
*/
java.util.List getStorageTypesList();
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
*/
int getStorageTypesCount();
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index);
// repeated string storageIDs = 8;
/**
* repeated string storageIDs = 8;
*/
java.util.List
getStorageIDsList();
/**
* repeated string storageIDs = 8;
*/
int getStorageIDsCount();
/**
* repeated string storageIDs = 8;
*/
java.lang.String getStorageIDs(int index);
/**
* repeated string storageIDs = 8;
*/
com.google.protobuf.ByteString
getStorageIDsBytes(int index);
}
/**
* Protobuf type {@code hadoop.hdfs.LocatedBlockProto}
*
*
**
* A LocatedBlock gives information about a block and its location.
*
*/
public static final class LocatedBlockProto extends
com.google.protobuf.GeneratedMessage
implements LocatedBlockProtoOrBuilder {
// Use LocatedBlockProto.newBuilder() to construct.
private LocatedBlockProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private LocatedBlockProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final LocatedBlockProto defaultInstance;
public static LocatedBlockProto getDefaultInstance() {
return defaultInstance;
}
public LocatedBlockProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private LocatedBlockProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = b_.toBuilder();
}
b_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(b_);
b_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 16: {
bitField0_ |= 0x00000002;
offset_ = input.readUInt64();
break;
}
case 26: {
if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
locs_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000004;
}
locs_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.PARSER, extensionRegistry));
break;
}
case 32: {
bitField0_ |= 0x00000004;
corrupt_ = input.readBool();
break;
}
case 42: {
org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000008) == 0x00000008)) {
subBuilder = blockToken_.toBuilder();
}
blockToken_ = input.readMessage(org.apache.hadoop.security.proto.SecurityProtos.TokenProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(blockToken_);
blockToken_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000008;
break;
}
case 48: {
if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
isCached_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000020;
}
isCached_.add(input.readBool());
break;
}
case 50: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00000020) == 0x00000020) && input.getBytesUntilLimit() > 0) {
isCached_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000020;
}
while (input.getBytesUntilLimit() > 0) {
isCached_.add(input.readBool());
}
input.popLimit(limit);
break;
}
case 56: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(7, rawValue);
} else {
if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
storageTypes_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000040;
}
storageTypes_.add(value);
}
break;
}
case 58: {
int length = input.readRawVarint32();
int oldLimit = input.pushLimit(length);
while(input.getBytesUntilLimit() > 0) {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(7, rawValue);
} else {
if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
storageTypes_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000040;
}
storageTypes_.add(value);
}
}
input.popLimit(oldLimit);
break;
}
case 66: {
if (!((mutable_bitField0_ & 0x00000080) == 0x00000080)) {
storageIDs_ = new com.google.protobuf.LazyStringArrayList();
mutable_bitField0_ |= 0x00000080;
}
storageIDs_.add(input.readBytes());
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
locs_ = java.util.Collections.unmodifiableList(locs_);
}
if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
isCached_ = java.util.Collections.unmodifiableList(isCached_);
}
if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
storageTypes_ = java.util.Collections.unmodifiableList(storageTypes_);
}
if (((mutable_bitField0_ & 0x00000080) == 0x00000080)) {
storageIDs_ = new com.google.protobuf.UnmodifiableLazyStringList(storageIDs_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlockProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public LocatedBlockProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new LocatedBlockProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.ExtendedBlockProto b = 1;
public static final int B_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto b_;
/**
* required .hadoop.hdfs.ExtendedBlockProto b = 1;
*/
public boolean hasB() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ExtendedBlockProto b = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB() {
return b_;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto b = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder() {
return b_;
}
// required uint64 offset = 2;
public static final int OFFSET_FIELD_NUMBER = 2;
private long offset_;
/**
* required uint64 offset = 2;
*
*
* offset of first byte of block in the file
*
*/
public boolean hasOffset() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 offset = 2;
*
*
* offset of first byte of block in the file
*
*/
public long getOffset() {
return offset_;
}
// repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
public static final int LOCS_FIELD_NUMBER = 3;
private java.util.List locs_;
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public java.util.List getLocsList() {
return locs_;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getLocsOrBuilderList() {
return locs_;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public int getLocsCount() {
return locs_.size();
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getLocs(int index) {
return locs_.get(index);
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getLocsOrBuilder(
int index) {
return locs_.get(index);
}
// required bool corrupt = 4;
public static final int CORRUPT_FIELD_NUMBER = 4;
private boolean corrupt_;
/**
* required bool corrupt = 4;
*
*
* true if all replicas of a block are corrupt, else false
*
*/
public boolean hasCorrupt() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required bool corrupt = 4;
*
*
* true if all replicas of a block are corrupt, else false
*
*/
public boolean getCorrupt() {
return corrupt_;
}
// required .hadoop.common.TokenProto blockToken = 5;
public static final int BLOCKTOKEN_FIELD_NUMBER = 5;
private org.apache.hadoop.security.proto.SecurityProtos.TokenProto blockToken_;
/**
* required .hadoop.common.TokenProto blockToken = 5;
*/
public boolean hasBlockToken() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required .hadoop.common.TokenProto blockToken = 5;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockToken() {
return blockToken_;
}
/**
* required .hadoop.common.TokenProto blockToken = 5;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokenOrBuilder() {
return blockToken_;
}
// repeated bool isCached = 6 [packed = true];
public static final int ISCACHED_FIELD_NUMBER = 6;
private java.util.List isCached_;
/**
* repeated bool isCached = 6 [packed = true];
*
*
* if a location in locs is cached
*
*/
public java.util.List
getIsCachedList() {
return isCached_;
}
/**
* repeated bool isCached = 6 [packed = true];
*
*
* if a location in locs is cached
*
*/
public int getIsCachedCount() {
return isCached_.size();
}
/**
* repeated bool isCached = 6 [packed = true];
*
*
* if a location in locs is cached
*
*/
public boolean getIsCached(int index) {
return isCached_.get(index);
}
private int isCachedMemoizedSerializedSize = -1;
// repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
public static final int STORAGETYPES_FIELD_NUMBER = 7;
private java.util.List storageTypes_;
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
*/
public java.util.List getStorageTypesList() {
return storageTypes_;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
*/
public int getStorageTypesCount() {
return storageTypes_.size();
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) {
return storageTypes_.get(index);
}
// repeated string storageIDs = 8;
public static final int STORAGEIDS_FIELD_NUMBER = 8;
private com.google.protobuf.LazyStringList storageIDs_;
/**
* repeated string storageIDs = 8;
*/
public java.util.List
getStorageIDsList() {
return storageIDs_;
}
/**
* repeated string storageIDs = 8;
*/
public int getStorageIDsCount() {
return storageIDs_.size();
}
/**
* repeated string storageIDs = 8;
*/
public java.lang.String getStorageIDs(int index) {
return storageIDs_.get(index);
}
/**
* repeated string storageIDs = 8;
*/
public com.google.protobuf.ByteString
getStorageIDsBytes(int index) {
return storageIDs_.getByteString(index);
}
private void initFields() {
b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
offset_ = 0L;
locs_ = java.util.Collections.emptyList();
corrupt_ = false;
blockToken_ = org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance();
isCached_ = java.util.Collections.emptyList();
storageTypes_ = java.util.Collections.emptyList();
storageIDs_ = com.google.protobuf.LazyStringArrayList.EMPTY;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasB()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasOffset()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasCorrupt()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasBlockToken()) {
memoizedIsInitialized = 0;
return false;
}
if (!getB().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getLocsCount(); i++) {
if (!getLocs(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (!getBlockToken().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, b_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, offset_);
}
for (int i = 0; i < locs_.size(); i++) {
output.writeMessage(3, locs_.get(i));
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBool(4, corrupt_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeMessage(5, blockToken_);
}
if (getIsCachedList().size() > 0) {
output.writeRawVarint32(50);
output.writeRawVarint32(isCachedMemoizedSerializedSize);
}
for (int i = 0; i < isCached_.size(); i++) {
output.writeBoolNoTag(isCached_.get(i));
}
for (int i = 0; i < storageTypes_.size(); i++) {
output.writeEnum(7, storageTypes_.get(i).getNumber());
}
for (int i = 0; i < storageIDs_.size(); i++) {
output.writeBytes(8, storageIDs_.getByteString(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, b_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, offset_);
}
for (int i = 0; i < locs_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, locs_.get(i));
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(4, corrupt_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(5, blockToken_);
}
{
int dataSize = 0;
dataSize = 1 * getIsCachedList().size();
size += dataSize;
if (!getIsCachedList().isEmpty()) {
size += 1;
size += com.google.protobuf.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
isCachedMemoizedSerializedSize = dataSize;
}
{
int dataSize = 0;
for (int i = 0; i < storageTypes_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeEnumSizeNoTag(storageTypes_.get(i).getNumber());
}
size += dataSize;
size += 1 * storageTypes_.size();
}
{
int dataSize = 0;
for (int i = 0; i < storageIDs_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeBytesSizeNoTag(storageIDs_.getByteString(i));
}
size += dataSize;
size += 1 * getStorageIDsList().size();
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto) obj;
boolean result = true;
result = result && (hasB() == other.hasB());
if (hasB()) {
result = result && getB()
.equals(other.getB());
}
result = result && (hasOffset() == other.hasOffset());
if (hasOffset()) {
result = result && (getOffset()
== other.getOffset());
}
result = result && getLocsList()
.equals(other.getLocsList());
result = result && (hasCorrupt() == other.hasCorrupt());
if (hasCorrupt()) {
result = result && (getCorrupt()
== other.getCorrupt());
}
result = result && (hasBlockToken() == other.hasBlockToken());
if (hasBlockToken()) {
result = result && getBlockToken()
.equals(other.getBlockToken());
}
result = result && getIsCachedList()
.equals(other.getIsCachedList());
result = result && getStorageTypesList()
.equals(other.getStorageTypesList());
result = result && getStorageIDsList()
.equals(other.getStorageIDsList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasB()) {
hash = (37 * hash) + B_FIELD_NUMBER;
hash = (53 * hash) + getB().hashCode();
}
if (hasOffset()) {
hash = (37 * hash) + OFFSET_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getOffset());
}
if (getLocsCount() > 0) {
hash = (37 * hash) + LOCS_FIELD_NUMBER;
hash = (53 * hash) + getLocsList().hashCode();
}
if (hasCorrupt()) {
hash = (37 * hash) + CORRUPT_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getCorrupt());
}
if (hasBlockToken()) {
hash = (37 * hash) + BLOCKTOKEN_FIELD_NUMBER;
hash = (53 * hash) + getBlockToken().hashCode();
}
if (getIsCachedCount() > 0) {
hash = (37 * hash) + ISCACHED_FIELD_NUMBER;
hash = (53 * hash) + getIsCachedList().hashCode();
}
if (getStorageTypesCount() > 0) {
hash = (37 * hash) + STORAGETYPES_FIELD_NUMBER;
hash = (53 * hash) + hashEnumList(getStorageTypesList());
}
if (getStorageIDsCount() > 0) {
hash = (37 * hash) + STORAGEIDS_FIELD_NUMBER;
hash = (53 * hash) + getStorageIDsList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.LocatedBlockProto}
*
*
**
* A LocatedBlock gives information about a block and its location.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlockProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getBFieldBuilder();
getLocsFieldBuilder();
getBlockTokenFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (bBuilder_ == null) {
b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
} else {
bBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
offset_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
if (locsBuilder_ == null) {
locs_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
} else {
locsBuilder_.clear();
}
corrupt_ = false;
bitField0_ = (bitField0_ & ~0x00000008);
if (blockTokenBuilder_ == null) {
blockToken_ = org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance();
} else {
blockTokenBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000010);
isCached_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000020);
storageTypes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000040);
storageIDs_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000080);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlockProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (bBuilder_ == null) {
result.b_ = b_;
} else {
result.b_ = bBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.offset_ = offset_;
if (locsBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004)) {
locs_ = java.util.Collections.unmodifiableList(locs_);
bitField0_ = (bitField0_ & ~0x00000004);
}
result.locs_ = locs_;
} else {
result.locs_ = locsBuilder_.build();
}
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000004;
}
result.corrupt_ = corrupt_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000008;
}
if (blockTokenBuilder_ == null) {
result.blockToken_ = blockToken_;
} else {
result.blockToken_ = blockTokenBuilder_.build();
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
isCached_ = java.util.Collections.unmodifiableList(isCached_);
bitField0_ = (bitField0_ & ~0x00000020);
}
result.isCached_ = isCached_;
if (((bitField0_ & 0x00000040) == 0x00000040)) {
storageTypes_ = java.util.Collections.unmodifiableList(storageTypes_);
bitField0_ = (bitField0_ & ~0x00000040);
}
result.storageTypes_ = storageTypes_;
if (((bitField0_ & 0x00000080) == 0x00000080)) {
storageIDs_ = new com.google.protobuf.UnmodifiableLazyStringList(
storageIDs_);
bitField0_ = (bitField0_ & ~0x00000080);
}
result.storageIDs_ = storageIDs_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()) return this;
if (other.hasB()) {
mergeB(other.getB());
}
if (other.hasOffset()) {
setOffset(other.getOffset());
}
if (locsBuilder_ == null) {
if (!other.locs_.isEmpty()) {
if (locs_.isEmpty()) {
locs_ = other.locs_;
bitField0_ = (bitField0_ & ~0x00000004);
} else {
ensureLocsIsMutable();
locs_.addAll(other.locs_);
}
onChanged();
}
} else {
if (!other.locs_.isEmpty()) {
if (locsBuilder_.isEmpty()) {
locsBuilder_.dispose();
locsBuilder_ = null;
locs_ = other.locs_;
bitField0_ = (bitField0_ & ~0x00000004);
locsBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getLocsFieldBuilder() : null;
} else {
locsBuilder_.addAllMessages(other.locs_);
}
}
}
if (other.hasCorrupt()) {
setCorrupt(other.getCorrupt());
}
if (other.hasBlockToken()) {
mergeBlockToken(other.getBlockToken());
}
if (!other.isCached_.isEmpty()) {
if (isCached_.isEmpty()) {
isCached_ = other.isCached_;
bitField0_ = (bitField0_ & ~0x00000020);
} else {
ensureIsCachedIsMutable();
isCached_.addAll(other.isCached_);
}
onChanged();
}
if (!other.storageTypes_.isEmpty()) {
if (storageTypes_.isEmpty()) {
storageTypes_ = other.storageTypes_;
bitField0_ = (bitField0_ & ~0x00000040);
} else {
ensureStorageTypesIsMutable();
storageTypes_.addAll(other.storageTypes_);
}
onChanged();
}
if (!other.storageIDs_.isEmpty()) {
if (storageIDs_.isEmpty()) {
storageIDs_ = other.storageIDs_;
bitField0_ = (bitField0_ & ~0x00000080);
} else {
ensureStorageIDsIsMutable();
storageIDs_.addAll(other.storageIDs_);
}
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasB()) {
return false;
}
if (!hasOffset()) {
return false;
}
if (!hasCorrupt()) {
return false;
}
if (!hasBlockToken()) {
return false;
}
if (!getB().isInitialized()) {
return false;
}
for (int i = 0; i < getLocsCount(); i++) {
if (!getLocs(i).isInitialized()) {
return false;
}
}
if (!getBlockToken().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.ExtendedBlockProto b = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> bBuilder_;
/**
* required .hadoop.hdfs.ExtendedBlockProto b = 1;
*/
public boolean hasB() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ExtendedBlockProto b = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB() {
if (bBuilder_ == null) {
return b_;
} else {
return bBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ExtendedBlockProto b = 1;
*/
public Builder setB(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
if (bBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
b_ = value;
onChanged();
} else {
bBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto b = 1;
*/
public Builder setB(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) {
if (bBuilder_ == null) {
b_ = builderForValue.build();
onChanged();
} else {
bBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto b = 1;
*/
public Builder mergeB(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
if (bBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
b_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) {
b_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(b_).mergeFrom(value).buildPartial();
} else {
b_ = value;
}
onChanged();
} else {
bBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto b = 1;
*/
public Builder clearB() {
if (bBuilder_ == null) {
b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
onChanged();
} else {
bBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto b = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getBFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ExtendedBlockProto b = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder() {
if (bBuilder_ != null) {
return bBuilder_.getMessageOrBuilder();
} else {
return b_;
}
}
/**
* required .hadoop.hdfs.ExtendedBlockProto b = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>
getBFieldBuilder() {
if (bBuilder_ == null) {
bBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>(
b_,
getParentForChildren(),
isClean());
b_ = null;
}
return bBuilder_;
}
// required uint64 offset = 2;
private long offset_ ;
/**
* required uint64 offset = 2;
*
*
* offset of first byte of block in the file
*
*/
public boolean hasOffset() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 offset = 2;
*
*
* offset of first byte of block in the file
*
*/
public long getOffset() {
return offset_;
}
/**
* required uint64 offset = 2;
*
*
* offset of first byte of block in the file
*
*/
public Builder setOffset(long value) {
bitField0_ |= 0x00000002;
offset_ = value;
onChanged();
return this;
}
/**
* required uint64 offset = 2;
*
*
* offset of first byte of block in the file
*
*/
public Builder clearOffset() {
bitField0_ = (bitField0_ & ~0x00000002);
offset_ = 0L;
onChanged();
return this;
}
// repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
private java.util.List locs_ =
java.util.Collections.emptyList();
private void ensureLocsIsMutable() {
if (!((bitField0_ & 0x00000004) == 0x00000004)) {
locs_ = new java.util.ArrayList(locs_);
bitField0_ |= 0x00000004;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> locsBuilder_;
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public java.util.List getLocsList() {
if (locsBuilder_ == null) {
return java.util.Collections.unmodifiableList(locs_);
} else {
return locsBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public int getLocsCount() {
if (locsBuilder_ == null) {
return locs_.size();
} else {
return locsBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getLocs(int index) {
if (locsBuilder_ == null) {
return locs_.get(index);
} else {
return locsBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public Builder setLocs(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (locsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureLocsIsMutable();
locs_.set(index, value);
onChanged();
} else {
locsBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public Builder setLocs(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (locsBuilder_ == null) {
ensureLocsIsMutable();
locs_.set(index, builderForValue.build());
onChanged();
} else {
locsBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public Builder addLocs(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (locsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureLocsIsMutable();
locs_.add(value);
onChanged();
} else {
locsBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public Builder addLocs(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (locsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureLocsIsMutable();
locs_.add(index, value);
onChanged();
} else {
locsBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public Builder addLocs(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (locsBuilder_ == null) {
ensureLocsIsMutable();
locs_.add(builderForValue.build());
onChanged();
} else {
locsBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public Builder addLocs(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (locsBuilder_ == null) {
ensureLocsIsMutable();
locs_.add(index, builderForValue.build());
onChanged();
} else {
locsBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public Builder addAllLocs(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> values) {
if (locsBuilder_ == null) {
ensureLocsIsMutable();
super.addAll(values, locs_);
onChanged();
} else {
locsBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public Builder clearLocs() {
if (locsBuilder_ == null) {
locs_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
onChanged();
} else {
locsBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public Builder removeLocs(int index) {
if (locsBuilder_ == null) {
ensureLocsIsMutable();
locs_.remove(index);
onChanged();
} else {
locsBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getLocsBuilder(
int index) {
return getLocsFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getLocsOrBuilder(
int index) {
if (locsBuilder_ == null) {
return locs_.get(index); } else {
return locsBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getLocsOrBuilderList() {
if (locsBuilder_ != null) {
return locsBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(locs_);
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addLocsBuilder() {
return getLocsFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addLocsBuilder(
int index) {
return getLocsFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public java.util.List
getLocsBuilderList() {
return getLocsFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getLocsFieldBuilder() {
if (locsBuilder_ == null) {
locsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>(
locs_,
((bitField0_ & 0x00000004) == 0x00000004),
getParentForChildren(),
isClean());
locs_ = null;
}
return locsBuilder_;
}
// required bool corrupt = 4;
private boolean corrupt_ ;
/**
* required bool corrupt = 4;
*
*
* true if all replicas of a block are corrupt, else false
*
*/
public boolean hasCorrupt() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required bool corrupt = 4;
*
*
* true if all replicas of a block are corrupt, else false
*
*/
public boolean getCorrupt() {
return corrupt_;
}
/**
* required bool corrupt = 4;
*
*
* true if all replicas of a block are corrupt, else false
*
*/
public Builder setCorrupt(boolean value) {
bitField0_ |= 0x00000008;
corrupt_ = value;
onChanged();
return this;
}
/**
* required bool corrupt = 4;
*
*
* true if all replicas of a block are corrupt, else false
*
*/
public Builder clearCorrupt() {
bitField0_ = (bitField0_ & ~0x00000008);
corrupt_ = false;
onChanged();
return this;
}
// required .hadoop.common.TokenProto blockToken = 5;
private org.apache.hadoop.security.proto.SecurityProtos.TokenProto blockToken_ = org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> blockTokenBuilder_;
/**
* required .hadoop.common.TokenProto blockToken = 5;
*/
public boolean hasBlockToken() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required .hadoop.common.TokenProto blockToken = 5;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockToken() {
if (blockTokenBuilder_ == null) {
return blockToken_;
} else {
return blockTokenBuilder_.getMessage();
}
}
/**
* required .hadoop.common.TokenProto blockToken = 5;
*/
public Builder setBlockToken(org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
if (blockTokenBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
blockToken_ = value;
onChanged();
} else {
blockTokenBuilder_.setMessage(value);
}
bitField0_ |= 0x00000010;
return this;
}
/**
* required .hadoop.common.TokenProto blockToken = 5;
*/
public Builder setBlockToken(
org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) {
if (blockTokenBuilder_ == null) {
blockToken_ = builderForValue.build();
onChanged();
} else {
blockTokenBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000010;
return this;
}
/**
* required .hadoop.common.TokenProto blockToken = 5;
*/
public Builder mergeBlockToken(org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
if (blockTokenBuilder_ == null) {
if (((bitField0_ & 0x00000010) == 0x00000010) &&
blockToken_ != org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance()) {
blockToken_ =
org.apache.hadoop.security.proto.SecurityProtos.TokenProto.newBuilder(blockToken_).mergeFrom(value).buildPartial();
} else {
blockToken_ = value;
}
onChanged();
} else {
blockTokenBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000010;
return this;
}
/**
* required .hadoop.common.TokenProto blockToken = 5;
*/
public Builder clearBlockToken() {
if (blockTokenBuilder_ == null) {
blockToken_ = org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance();
onChanged();
} else {
blockTokenBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
/**
* required .hadoop.common.TokenProto blockToken = 5;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder getBlockTokenBuilder() {
bitField0_ |= 0x00000010;
onChanged();
return getBlockTokenFieldBuilder().getBuilder();
}
/**
* required .hadoop.common.TokenProto blockToken = 5;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokenOrBuilder() {
if (blockTokenBuilder_ != null) {
return blockTokenBuilder_.getMessageOrBuilder();
} else {
return blockToken_;
}
}
/**
* required .hadoop.common.TokenProto blockToken = 5;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>
getBlockTokenFieldBuilder() {
if (blockTokenBuilder_ == null) {
blockTokenBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>(
blockToken_,
getParentForChildren(),
isClean());
blockToken_ = null;
}
return blockTokenBuilder_;
}
// repeated bool isCached = 6 [packed = true];
private java.util.List isCached_ = java.util.Collections.emptyList();
private void ensureIsCachedIsMutable() {
if (!((bitField0_ & 0x00000020) == 0x00000020)) {
isCached_ = new java.util.ArrayList(isCached_);
bitField0_ |= 0x00000020;
}
}
/**
* repeated bool isCached = 6 [packed = true];
*
*
* if a location in locs is cached
*
*/
public java.util.List
getIsCachedList() {
return java.util.Collections.unmodifiableList(isCached_);
}
/**
* repeated bool isCached = 6 [packed = true];
*
*
* if a location in locs is cached
*
*/
public int getIsCachedCount() {
return isCached_.size();
}
/**
* repeated bool isCached = 6 [packed = true];
*
*
* if a location in locs is cached
*
*/
public boolean getIsCached(int index) {
return isCached_.get(index);
}
/**
* repeated bool isCached = 6 [packed = true];
*
*
* if a location in locs is cached
*
*/
public Builder setIsCached(
int index, boolean value) {
ensureIsCachedIsMutable();
isCached_.set(index, value);
onChanged();
return this;
}
/**
* repeated bool isCached = 6 [packed = true];
*
*
* if a location in locs is cached
*
*/
public Builder addIsCached(boolean value) {
ensureIsCachedIsMutable();
isCached_.add(value);
onChanged();
return this;
}
/**
* repeated bool isCached = 6 [packed = true];
*
*
* if a location in locs is cached
*
*/
public Builder addAllIsCached(
java.lang.Iterable extends java.lang.Boolean> values) {
ensureIsCachedIsMutable();
super.addAll(values, isCached_);
onChanged();
return this;
}
/**
* repeated bool isCached = 6 [packed = true];
*
*
* if a location in locs is cached
*
*/
public Builder clearIsCached() {
isCached_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000020);
onChanged();
return this;
}
// repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
private java.util.List storageTypes_ =
java.util.Collections.emptyList();
private void ensureStorageTypesIsMutable() {
if (!((bitField0_ & 0x00000040) == 0x00000040)) {
storageTypes_ = new java.util.ArrayList(storageTypes_);
bitField0_ |= 0x00000040;
}
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
*/
public java.util.List getStorageTypesList() {
return java.util.Collections.unmodifiableList(storageTypes_);
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
*/
public int getStorageTypesCount() {
return storageTypes_.size();
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) {
return storageTypes_.get(index);
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
*/
public Builder setStorageTypes(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
ensureStorageTypesIsMutable();
storageTypes_.set(index, value);
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
*/
public Builder addStorageTypes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
ensureStorageTypesIsMutable();
storageTypes_.add(value);
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
*/
public Builder addAllStorageTypes(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> values) {
ensureStorageTypesIsMutable();
super.addAll(values, storageTypes_);
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
*/
public Builder clearStorageTypes() {
storageTypes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000040);
onChanged();
return this;
}
// repeated string storageIDs = 8;
private com.google.protobuf.LazyStringList storageIDs_ = com.google.protobuf.LazyStringArrayList.EMPTY;
private void ensureStorageIDsIsMutable() {
if (!((bitField0_ & 0x00000080) == 0x00000080)) {
storageIDs_ = new com.google.protobuf.LazyStringArrayList(storageIDs_);
bitField0_ |= 0x00000080;
}
}
/**
* repeated string storageIDs = 8;
*/
public java.util.List
getStorageIDsList() {
return java.util.Collections.unmodifiableList(storageIDs_);
}
/**
* repeated string storageIDs = 8;
*/
public int getStorageIDsCount() {
return storageIDs_.size();
}
/**
* repeated string storageIDs = 8;
*/
public java.lang.String getStorageIDs(int index) {
return storageIDs_.get(index);
}
/**
* repeated string storageIDs = 8;
*/
public com.google.protobuf.ByteString
getStorageIDsBytes(int index) {
return storageIDs_.getByteString(index);
}
/**
* repeated string storageIDs = 8;
*/
public Builder setStorageIDs(
int index, java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureStorageIDsIsMutable();
storageIDs_.set(index, value);
onChanged();
return this;
}
/**
* repeated string storageIDs = 8;
*/
public Builder addStorageIDs(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureStorageIDsIsMutable();
storageIDs_.add(value);
onChanged();
return this;
}
/**
* repeated string storageIDs = 8;
*/
public Builder addAllStorageIDs(
java.lang.Iterable values) {
ensureStorageIDsIsMutable();
super.addAll(values, storageIDs_);
onChanged();
return this;
}
/**
* repeated string storageIDs = 8;
*/
public Builder clearStorageIDs() {
storageIDs_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000080);
onChanged();
return this;
}
/**
* repeated string storageIDs = 8;
*/
public Builder addStorageIDsBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
ensureStorageIDsIsMutable();
storageIDs_.add(value);
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.LocatedBlockProto)
}
static {
defaultInstance = new LocatedBlockProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.LocatedBlockProto)
}
public interface DataEncryptionKeyProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required uint32 keyId = 1;
/**
* required uint32 keyId = 1;
*/
boolean hasKeyId();
/**
* required uint32 keyId = 1;
*/
int getKeyId();
// required string blockPoolId = 2;
/**
* required string blockPoolId = 2;
*/
boolean hasBlockPoolId();
/**
* required string blockPoolId = 2;
*/
java.lang.String getBlockPoolId();
/**
* required string blockPoolId = 2;
*/
com.google.protobuf.ByteString
getBlockPoolIdBytes();
// required bytes nonce = 3;
/**
* required bytes nonce = 3;
*/
boolean hasNonce();
/**
* required bytes nonce = 3;
*/
com.google.protobuf.ByteString getNonce();
// required bytes encryptionKey = 4;
/**
* required bytes encryptionKey = 4;
*/
boolean hasEncryptionKey();
/**
* required bytes encryptionKey = 4;
*/
com.google.protobuf.ByteString getEncryptionKey();
// required uint64 expiryDate = 5;
/**
* required uint64 expiryDate = 5;
*/
boolean hasExpiryDate();
/**
* required uint64 expiryDate = 5;
*/
long getExpiryDate();
// optional string encryptionAlgorithm = 6;
/**
* optional string encryptionAlgorithm = 6;
*/
boolean hasEncryptionAlgorithm();
/**
* optional string encryptionAlgorithm = 6;
*/
java.lang.String getEncryptionAlgorithm();
/**
* optional string encryptionAlgorithm = 6;
*/
com.google.protobuf.ByteString
getEncryptionAlgorithmBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.DataEncryptionKeyProto}
*/
public static final class DataEncryptionKeyProto extends
com.google.protobuf.GeneratedMessage
implements DataEncryptionKeyProtoOrBuilder {
// Use DataEncryptionKeyProto.newBuilder() to construct.
private DataEncryptionKeyProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private DataEncryptionKeyProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final DataEncryptionKeyProto defaultInstance;
public static DataEncryptionKeyProto getDefaultInstance() {
return defaultInstance;
}
public DataEncryptionKeyProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DataEncryptionKeyProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
keyId_ = input.readUInt32();
break;
}
case 18: {
bitField0_ |= 0x00000002;
blockPoolId_ = input.readBytes();
break;
}
case 26: {
bitField0_ |= 0x00000004;
nonce_ = input.readBytes();
break;
}
case 34: {
bitField0_ |= 0x00000008;
encryptionKey_ = input.readBytes();
break;
}
case 40: {
bitField0_ |= 0x00000010;
expiryDate_ = input.readUInt64();
break;
}
case 50: {
bitField0_ |= 0x00000020;
encryptionAlgorithm_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DataEncryptionKeyProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DataEncryptionKeyProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public DataEncryptionKeyProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new DataEncryptionKeyProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required uint32 keyId = 1;
public static final int KEYID_FIELD_NUMBER = 1;
private int keyId_;
/**
* required uint32 keyId = 1;
*/
public boolean hasKeyId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint32 keyId = 1;
*/
public int getKeyId() {
return keyId_;
}
// required string blockPoolId = 2;
public static final int BLOCKPOOLID_FIELD_NUMBER = 2;
private java.lang.Object blockPoolId_;
/**
* required string blockPoolId = 2;
*/
public boolean hasBlockPoolId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string blockPoolId = 2;
*/
public java.lang.String getBlockPoolId() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
blockPoolId_ = s;
}
return s;
}
}
/**
* required string blockPoolId = 2;
*/
public com.google.protobuf.ByteString
getBlockPoolIdBytes() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
blockPoolId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required bytes nonce = 3;
public static final int NONCE_FIELD_NUMBER = 3;
private com.google.protobuf.ByteString nonce_;
/**
* required bytes nonce = 3;
*/
public boolean hasNonce() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required bytes nonce = 3;
*/
public com.google.protobuf.ByteString getNonce() {
return nonce_;
}
// required bytes encryptionKey = 4;
public static final int ENCRYPTIONKEY_FIELD_NUMBER = 4;
private com.google.protobuf.ByteString encryptionKey_;
/**
* required bytes encryptionKey = 4;
*/
public boolean hasEncryptionKey() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required bytes encryptionKey = 4;
*/
public com.google.protobuf.ByteString getEncryptionKey() {
return encryptionKey_;
}
// required uint64 expiryDate = 5;
public static final int EXPIRYDATE_FIELD_NUMBER = 5;
private long expiryDate_;
/**
* required uint64 expiryDate = 5;
*/
public boolean hasExpiryDate() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required uint64 expiryDate = 5;
*/
public long getExpiryDate() {
return expiryDate_;
}
// optional string encryptionAlgorithm = 6;
public static final int ENCRYPTIONALGORITHM_FIELD_NUMBER = 6;
private java.lang.Object encryptionAlgorithm_;
/**
* optional string encryptionAlgorithm = 6;
*/
public boolean hasEncryptionAlgorithm() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional string encryptionAlgorithm = 6;
*/
public java.lang.String getEncryptionAlgorithm() {
java.lang.Object ref = encryptionAlgorithm_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
encryptionAlgorithm_ = s;
}
return s;
}
}
/**
* optional string encryptionAlgorithm = 6;
*/
public com.google.protobuf.ByteString
getEncryptionAlgorithmBytes() {
java.lang.Object ref = encryptionAlgorithm_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
encryptionAlgorithm_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
keyId_ = 0;
blockPoolId_ = "";
nonce_ = com.google.protobuf.ByteString.EMPTY;
encryptionKey_ = com.google.protobuf.ByteString.EMPTY;
expiryDate_ = 0L;
encryptionAlgorithm_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasKeyId()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasBlockPoolId()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasNonce()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasEncryptionKey()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasExpiryDate()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt32(1, keyId_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getBlockPoolIdBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, nonce_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeBytes(4, encryptionKey_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeUInt64(5, expiryDate_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeBytes(6, getEncryptionAlgorithmBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(1, keyId_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getBlockPoolIdBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, nonce_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(4, encryptionKey_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(5, expiryDate_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(6, getEncryptionAlgorithmBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto) obj;
boolean result = true;
result = result && (hasKeyId() == other.hasKeyId());
if (hasKeyId()) {
result = result && (getKeyId()
== other.getKeyId());
}
result = result && (hasBlockPoolId() == other.hasBlockPoolId());
if (hasBlockPoolId()) {
result = result && getBlockPoolId()
.equals(other.getBlockPoolId());
}
result = result && (hasNonce() == other.hasNonce());
if (hasNonce()) {
result = result && getNonce()
.equals(other.getNonce());
}
result = result && (hasEncryptionKey() == other.hasEncryptionKey());
if (hasEncryptionKey()) {
result = result && getEncryptionKey()
.equals(other.getEncryptionKey());
}
result = result && (hasExpiryDate() == other.hasExpiryDate());
if (hasExpiryDate()) {
result = result && (getExpiryDate()
== other.getExpiryDate());
}
result = result && (hasEncryptionAlgorithm() == other.hasEncryptionAlgorithm());
if (hasEncryptionAlgorithm()) {
result = result && getEncryptionAlgorithm()
.equals(other.getEncryptionAlgorithm());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasKeyId()) {
hash = (37 * hash) + KEYID_FIELD_NUMBER;
hash = (53 * hash) + getKeyId();
}
if (hasBlockPoolId()) {
hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER;
hash = (53 * hash) + getBlockPoolId().hashCode();
}
if (hasNonce()) {
hash = (37 * hash) + NONCE_FIELD_NUMBER;
hash = (53 * hash) + getNonce().hashCode();
}
if (hasEncryptionKey()) {
hash = (37 * hash) + ENCRYPTIONKEY_FIELD_NUMBER;
hash = (53 * hash) + getEncryptionKey().hashCode();
}
if (hasExpiryDate()) {
hash = (37 * hash) + EXPIRYDATE_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getExpiryDate());
}
if (hasEncryptionAlgorithm()) {
hash = (37 * hash) + ENCRYPTIONALGORITHM_FIELD_NUMBER;
hash = (53 * hash) + getEncryptionAlgorithm().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.DataEncryptionKeyProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DataEncryptionKeyProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DataEncryptionKeyProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
keyId_ = 0;
bitField0_ = (bitField0_ & ~0x00000001);
blockPoolId_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
nonce_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000004);
encryptionKey_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000008);
expiryDate_ = 0L;
bitField0_ = (bitField0_ & ~0x00000010);
encryptionAlgorithm_ = "";
bitField0_ = (bitField0_ & ~0x00000020);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DataEncryptionKeyProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.keyId_ = keyId_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.blockPoolId_ = blockPoolId_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.nonce_ = nonce_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.encryptionKey_ = encryptionKey_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.expiryDate_ = expiryDate_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000020;
}
result.encryptionAlgorithm_ = encryptionAlgorithm_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.getDefaultInstance()) return this;
if (other.hasKeyId()) {
setKeyId(other.getKeyId());
}
if (other.hasBlockPoolId()) {
bitField0_ |= 0x00000002;
blockPoolId_ = other.blockPoolId_;
onChanged();
}
if (other.hasNonce()) {
setNonce(other.getNonce());
}
if (other.hasEncryptionKey()) {
setEncryptionKey(other.getEncryptionKey());
}
if (other.hasExpiryDate()) {
setExpiryDate(other.getExpiryDate());
}
if (other.hasEncryptionAlgorithm()) {
bitField0_ |= 0x00000020;
encryptionAlgorithm_ = other.encryptionAlgorithm_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasKeyId()) {
return false;
}
if (!hasBlockPoolId()) {
return false;
}
if (!hasNonce()) {
return false;
}
if (!hasEncryptionKey()) {
return false;
}
if (!hasExpiryDate()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required uint32 keyId = 1;
private int keyId_ ;
/**
* required uint32 keyId = 1;
*/
public boolean hasKeyId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint32 keyId = 1;
*/
public int getKeyId() {
return keyId_;
}
/**
* required uint32 keyId = 1;
*/
public Builder setKeyId(int value) {
bitField0_ |= 0x00000001;
keyId_ = value;
onChanged();
return this;
}
/**
* required uint32 keyId = 1;
*/
public Builder clearKeyId() {
bitField0_ = (bitField0_ & ~0x00000001);
keyId_ = 0;
onChanged();
return this;
}
// required string blockPoolId = 2;
private java.lang.Object blockPoolId_ = "";
/**
* required string blockPoolId = 2;
*/
public boolean hasBlockPoolId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string blockPoolId = 2;
*/
public java.lang.String getBlockPoolId() {
java.lang.Object ref = blockPoolId_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
blockPoolId_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string blockPoolId = 2;
*/
public com.google.protobuf.ByteString
getBlockPoolIdBytes() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
blockPoolId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string blockPoolId = 2;
*/
public Builder setBlockPoolId(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
blockPoolId_ = value;
onChanged();
return this;
}
/**
* required string blockPoolId = 2;
*/
public Builder clearBlockPoolId() {
bitField0_ = (bitField0_ & ~0x00000002);
blockPoolId_ = getDefaultInstance().getBlockPoolId();
onChanged();
return this;
}
/**
* required string blockPoolId = 2;
*/
public Builder setBlockPoolIdBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
blockPoolId_ = value;
onChanged();
return this;
}
// required bytes nonce = 3;
private com.google.protobuf.ByteString nonce_ = com.google.protobuf.ByteString.EMPTY;
/**
* required bytes nonce = 3;
*/
public boolean hasNonce() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required bytes nonce = 3;
*/
public com.google.protobuf.ByteString getNonce() {
return nonce_;
}
/**
* required bytes nonce = 3;
*/
public Builder setNonce(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
nonce_ = value;
onChanged();
return this;
}
/**
* required bytes nonce = 3;
*/
public Builder clearNonce() {
bitField0_ = (bitField0_ & ~0x00000004);
nonce_ = getDefaultInstance().getNonce();
onChanged();
return this;
}
// required bytes encryptionKey = 4;
private com.google.protobuf.ByteString encryptionKey_ = com.google.protobuf.ByteString.EMPTY;
/**
* required bytes encryptionKey = 4;
*/
public boolean hasEncryptionKey() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required bytes encryptionKey = 4;
*/
public com.google.protobuf.ByteString getEncryptionKey() {
return encryptionKey_;
}
/**
* required bytes encryptionKey = 4;
*/
public Builder setEncryptionKey(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
encryptionKey_ = value;
onChanged();
return this;
}
/**
* required bytes encryptionKey = 4;
*/
public Builder clearEncryptionKey() {
bitField0_ = (bitField0_ & ~0x00000008);
encryptionKey_ = getDefaultInstance().getEncryptionKey();
onChanged();
return this;
}
// required uint64 expiryDate = 5;
private long expiryDate_ ;
/**
* required uint64 expiryDate = 5;
*/
public boolean hasExpiryDate() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required uint64 expiryDate = 5;
*/
public long getExpiryDate() {
return expiryDate_;
}
/**
* required uint64 expiryDate = 5;
*/
public Builder setExpiryDate(long value) {
bitField0_ |= 0x00000010;
expiryDate_ = value;
onChanged();
return this;
}
/**
* required uint64 expiryDate = 5;
*/
public Builder clearExpiryDate() {
bitField0_ = (bitField0_ & ~0x00000010);
expiryDate_ = 0L;
onChanged();
return this;
}
// optional string encryptionAlgorithm = 6;
private java.lang.Object encryptionAlgorithm_ = "";
/**
* optional string encryptionAlgorithm = 6;
*/
public boolean hasEncryptionAlgorithm() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional string encryptionAlgorithm = 6;
*/
public java.lang.String getEncryptionAlgorithm() {
java.lang.Object ref = encryptionAlgorithm_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
encryptionAlgorithm_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string encryptionAlgorithm = 6;
*/
public com.google.protobuf.ByteString
getEncryptionAlgorithmBytes() {
java.lang.Object ref = encryptionAlgorithm_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
encryptionAlgorithm_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* optional string encryptionAlgorithm = 6;
*/
public Builder setEncryptionAlgorithm(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000020;
encryptionAlgorithm_ = value;
onChanged();
return this;
}
/**
* optional string encryptionAlgorithm = 6;
*/
public Builder clearEncryptionAlgorithm() {
bitField0_ = (bitField0_ & ~0x00000020);
encryptionAlgorithm_ = getDefaultInstance().getEncryptionAlgorithm();
onChanged();
return this;
}
/**
* optional string encryptionAlgorithm = 6;
*/
public Builder setEncryptionAlgorithmBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000020;
encryptionAlgorithm_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DataEncryptionKeyProto)
}
static {
defaultInstance = new DataEncryptionKeyProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DataEncryptionKeyProto)
}
public interface FileEncryptionInfoProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.CipherSuiteProto suite = 1;
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
boolean hasSuite();
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite();
// required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
boolean hasCryptoProtocolVersion();
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion();
// required bytes key = 3;
/**
* required bytes key = 3;
*/
boolean hasKey();
/**
* required bytes key = 3;
*/
com.google.protobuf.ByteString getKey();
// required bytes iv = 4;
/**
* required bytes iv = 4;
*/
boolean hasIv();
/**
* required bytes iv = 4;
*/
com.google.protobuf.ByteString getIv();
// required string keyName = 5;
/**
* required string keyName = 5;
*/
boolean hasKeyName();
/**
* required string keyName = 5;
*/
java.lang.String getKeyName();
/**
* required string keyName = 5;
*/
com.google.protobuf.ByteString
getKeyNameBytes();
// required string ezKeyVersionName = 6;
/**
* required string ezKeyVersionName = 6;
*/
boolean hasEzKeyVersionName();
/**
* required string ezKeyVersionName = 6;
*/
java.lang.String getEzKeyVersionName();
/**
* required string ezKeyVersionName = 6;
*/
com.google.protobuf.ByteString
getEzKeyVersionNameBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.FileEncryptionInfoProto}
*
*
**
* Encryption information for a file.
*
*/
public static final class FileEncryptionInfoProto extends
com.google.protobuf.GeneratedMessage
implements FileEncryptionInfoProtoOrBuilder {
// Use FileEncryptionInfoProto.newBuilder() to construct.
private FileEncryptionInfoProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private FileEncryptionInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final FileEncryptionInfoProto defaultInstance;
public static FileEncryptionInfoProto getDefaultInstance() {
return defaultInstance;
}
public FileEncryptionInfoProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private FileEncryptionInfoProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
suite_ = value;
}
break;
}
case 16: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(2, rawValue);
} else {
bitField0_ |= 0x00000002;
cryptoProtocolVersion_ = value;
}
break;
}
case 26: {
bitField0_ |= 0x00000004;
key_ = input.readBytes();
break;
}
case 34: {
bitField0_ |= 0x00000008;
iv_ = input.readBytes();
break;
}
case 42: {
bitField0_ |= 0x00000010;
keyName_ = input.readBytes();
break;
}
case 50: {
bitField0_ |= 0x00000020;
ezKeyVersionName_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FileEncryptionInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FileEncryptionInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public FileEncryptionInfoProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new FileEncryptionInfoProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.CipherSuiteProto suite = 1;
public static final int SUITE_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto suite_;
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public boolean hasSuite() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() {
return suite_;
}
// required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
public static final int CRYPTOPROTOCOLVERSION_FIELD_NUMBER = 2;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto cryptoProtocolVersion_;
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
public boolean hasCryptoProtocolVersion() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion() {
return cryptoProtocolVersion_;
}
// required bytes key = 3;
public static final int KEY_FIELD_NUMBER = 3;
private com.google.protobuf.ByteString key_;
/**
* required bytes key = 3;
*/
public boolean hasKey() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required bytes key = 3;
*/
public com.google.protobuf.ByteString getKey() {
return key_;
}
// required bytes iv = 4;
public static final int IV_FIELD_NUMBER = 4;
private com.google.protobuf.ByteString iv_;
/**
* required bytes iv = 4;
*/
public boolean hasIv() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required bytes iv = 4;
*/
public com.google.protobuf.ByteString getIv() {
return iv_;
}
// required string keyName = 5;
public static final int KEYNAME_FIELD_NUMBER = 5;
private java.lang.Object keyName_;
/**
* required string keyName = 5;
*/
public boolean hasKeyName() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required string keyName = 5;
*/
public java.lang.String getKeyName() {
java.lang.Object ref = keyName_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
keyName_ = s;
}
return s;
}
}
/**
* required string keyName = 5;
*/
public com.google.protobuf.ByteString
getKeyNameBytes() {
java.lang.Object ref = keyName_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
keyName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required string ezKeyVersionName = 6;
public static final int EZKEYVERSIONNAME_FIELD_NUMBER = 6;
private java.lang.Object ezKeyVersionName_;
/**
* required string ezKeyVersionName = 6;
*/
public boolean hasEzKeyVersionName() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* required string ezKeyVersionName = 6;
*/
public java.lang.String getEzKeyVersionName() {
java.lang.Object ref = ezKeyVersionName_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
ezKeyVersionName_ = s;
}
return s;
}
}
/**
* required string ezKeyVersionName = 6;
*/
public com.google.protobuf.ByteString
getEzKeyVersionNameBytes() {
java.lang.Object ref = ezKeyVersionName_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
ezKeyVersionName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN;
cryptoProtocolVersion_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION;
key_ = com.google.protobuf.ByteString.EMPTY;
iv_ = com.google.protobuf.ByteString.EMPTY;
keyName_ = "";
ezKeyVersionName_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasSuite()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasCryptoProtocolVersion()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasKey()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasIv()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasKeyName()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasEzKeyVersionName()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, suite_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeEnum(2, cryptoProtocolVersion_.getNumber());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, key_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeBytes(4, iv_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeBytes(5, getKeyNameBytes());
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeBytes(6, getEzKeyVersionNameBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, suite_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(2, cryptoProtocolVersion_.getNumber());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, key_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(4, iv_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(5, getKeyNameBytes());
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(6, getEzKeyVersionNameBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto) obj;
boolean result = true;
result = result && (hasSuite() == other.hasSuite());
if (hasSuite()) {
result = result &&
(getSuite() == other.getSuite());
}
result = result && (hasCryptoProtocolVersion() == other.hasCryptoProtocolVersion());
if (hasCryptoProtocolVersion()) {
result = result &&
(getCryptoProtocolVersion() == other.getCryptoProtocolVersion());
}
result = result && (hasKey() == other.hasKey());
if (hasKey()) {
result = result && getKey()
.equals(other.getKey());
}
result = result && (hasIv() == other.hasIv());
if (hasIv()) {
result = result && getIv()
.equals(other.getIv());
}
result = result && (hasKeyName() == other.hasKeyName());
if (hasKeyName()) {
result = result && getKeyName()
.equals(other.getKeyName());
}
result = result && (hasEzKeyVersionName() == other.hasEzKeyVersionName());
if (hasEzKeyVersionName()) {
result = result && getEzKeyVersionName()
.equals(other.getEzKeyVersionName());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasSuite()) {
hash = (37 * hash) + SUITE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getSuite());
}
if (hasCryptoProtocolVersion()) {
hash = (37 * hash) + CRYPTOPROTOCOLVERSION_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getCryptoProtocolVersion());
}
if (hasKey()) {
hash = (37 * hash) + KEY_FIELD_NUMBER;
hash = (53 * hash) + getKey().hashCode();
}
if (hasIv()) {
hash = (37 * hash) + IV_FIELD_NUMBER;
hash = (53 * hash) + getIv().hashCode();
}
if (hasKeyName()) {
hash = (37 * hash) + KEYNAME_FIELD_NUMBER;
hash = (53 * hash) + getKeyName().hashCode();
}
if (hasEzKeyVersionName()) {
hash = (37 * hash) + EZKEYVERSIONNAME_FIELD_NUMBER;
hash = (53 * hash) + getEzKeyVersionName().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.FileEncryptionInfoProto}
*
*
**
* Encryption information for a file.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FileEncryptionInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FileEncryptionInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN;
bitField0_ = (bitField0_ & ~0x00000001);
cryptoProtocolVersion_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION;
bitField0_ = (bitField0_ & ~0x00000002);
key_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000004);
iv_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000008);
keyName_ = "";
bitField0_ = (bitField0_ & ~0x00000010);
ezKeyVersionName_ = "";
bitField0_ = (bitField0_ & ~0x00000020);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FileEncryptionInfoProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.suite_ = suite_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.cryptoProtocolVersion_ = cryptoProtocolVersion_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.key_ = key_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.iv_ = iv_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.keyName_ = keyName_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000020;
}
result.ezKeyVersionName_ = ezKeyVersionName_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance()) return this;
if (other.hasSuite()) {
setSuite(other.getSuite());
}
if (other.hasCryptoProtocolVersion()) {
setCryptoProtocolVersion(other.getCryptoProtocolVersion());
}
if (other.hasKey()) {
setKey(other.getKey());
}
if (other.hasIv()) {
setIv(other.getIv());
}
if (other.hasKeyName()) {
bitField0_ |= 0x00000010;
keyName_ = other.keyName_;
onChanged();
}
if (other.hasEzKeyVersionName()) {
bitField0_ |= 0x00000020;
ezKeyVersionName_ = other.ezKeyVersionName_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasSuite()) {
return false;
}
if (!hasCryptoProtocolVersion()) {
return false;
}
if (!hasKey()) {
return false;
}
if (!hasIv()) {
return false;
}
if (!hasKeyName()) {
return false;
}
if (!hasEzKeyVersionName()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.CipherSuiteProto suite = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN;
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public boolean hasSuite() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() {
return suite_;
}
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public Builder setSuite(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
suite_ = value;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public Builder clearSuite() {
bitField0_ = (bitField0_ & ~0x00000001);
suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN;
onChanged();
return this;
}
// required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto cryptoProtocolVersion_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION;
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
public boolean hasCryptoProtocolVersion() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion() {
return cryptoProtocolVersion_;
}
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
public Builder setCryptoProtocolVersion(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
cryptoProtocolVersion_ = value;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
public Builder clearCryptoProtocolVersion() {
bitField0_ = (bitField0_ & ~0x00000002);
cryptoProtocolVersion_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION;
onChanged();
return this;
}
// required bytes key = 3;
private com.google.protobuf.ByteString key_ = com.google.protobuf.ByteString.EMPTY;
/**
* required bytes key = 3;
*/
public boolean hasKey() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required bytes key = 3;
*/
public com.google.protobuf.ByteString getKey() {
return key_;
}
/**
* required bytes key = 3;
*/
public Builder setKey(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
key_ = value;
onChanged();
return this;
}
/**
* required bytes key = 3;
*/
public Builder clearKey() {
bitField0_ = (bitField0_ & ~0x00000004);
key_ = getDefaultInstance().getKey();
onChanged();
return this;
}
// required bytes iv = 4;
private com.google.protobuf.ByteString iv_ = com.google.protobuf.ByteString.EMPTY;
/**
* required bytes iv = 4;
*/
public boolean hasIv() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required bytes iv = 4;
*/
public com.google.protobuf.ByteString getIv() {
return iv_;
}
/**
* required bytes iv = 4;
*/
public Builder setIv(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
iv_ = value;
onChanged();
return this;
}
/**
* required bytes iv = 4;
*/
public Builder clearIv() {
bitField0_ = (bitField0_ & ~0x00000008);
iv_ = getDefaultInstance().getIv();
onChanged();
return this;
}
// required string keyName = 5;
private java.lang.Object keyName_ = "";
/**
* required string keyName = 5;
*/
public boolean hasKeyName() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required string keyName = 5;
*/
public java.lang.String getKeyName() {
java.lang.Object ref = keyName_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
keyName_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string keyName = 5;
*/
public com.google.protobuf.ByteString
getKeyNameBytes() {
java.lang.Object ref = keyName_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
keyName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string keyName = 5;
*/
public Builder setKeyName(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000010;
keyName_ = value;
onChanged();
return this;
}
/**
* required string keyName = 5;
*/
public Builder clearKeyName() {
bitField0_ = (bitField0_ & ~0x00000010);
keyName_ = getDefaultInstance().getKeyName();
onChanged();
return this;
}
/**
* required string keyName = 5;
*/
public Builder setKeyNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000010;
keyName_ = value;
onChanged();
return this;
}
// required string ezKeyVersionName = 6;
private java.lang.Object ezKeyVersionName_ = "";
/**
* required string ezKeyVersionName = 6;
*/
public boolean hasEzKeyVersionName() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* required string ezKeyVersionName = 6;
*/
public java.lang.String getEzKeyVersionName() {
java.lang.Object ref = ezKeyVersionName_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
ezKeyVersionName_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string ezKeyVersionName = 6;
*/
public com.google.protobuf.ByteString
getEzKeyVersionNameBytes() {
java.lang.Object ref = ezKeyVersionName_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
ezKeyVersionName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string ezKeyVersionName = 6;
*/
public Builder setEzKeyVersionName(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000020;
ezKeyVersionName_ = value;
onChanged();
return this;
}
/**
* required string ezKeyVersionName = 6;
*/
public Builder clearEzKeyVersionName() {
bitField0_ = (bitField0_ & ~0x00000020);
ezKeyVersionName_ = getDefaultInstance().getEzKeyVersionName();
onChanged();
return this;
}
/**
* required string ezKeyVersionName = 6;
*/
public Builder setEzKeyVersionNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000020;
ezKeyVersionName_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.FileEncryptionInfoProto)
}
static {
defaultInstance = new FileEncryptionInfoProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.FileEncryptionInfoProto)
}
public interface PerFileEncryptionInfoProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required bytes key = 1;
/**
* required bytes key = 1;
*/
boolean hasKey();
/**
* required bytes key = 1;
*/
com.google.protobuf.ByteString getKey();
// required bytes iv = 2;
/**
* required bytes iv = 2;
*/
boolean hasIv();
/**
* required bytes iv = 2;
*/
com.google.protobuf.ByteString getIv();
// required string ezKeyVersionName = 3;
/**
* required string ezKeyVersionName = 3;
*/
boolean hasEzKeyVersionName();
/**
* required string ezKeyVersionName = 3;
*/
java.lang.String getEzKeyVersionName();
/**
* required string ezKeyVersionName = 3;
*/
com.google.protobuf.ByteString
getEzKeyVersionNameBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.PerFileEncryptionInfoProto}
*
*
**
* Encryption information for an individual
* file within an encryption zone
*
*/
public static final class PerFileEncryptionInfoProto extends
com.google.protobuf.GeneratedMessage
implements PerFileEncryptionInfoProtoOrBuilder {
// Use PerFileEncryptionInfoProto.newBuilder() to construct.
private PerFileEncryptionInfoProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private PerFileEncryptionInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final PerFileEncryptionInfoProto defaultInstance;
public static PerFileEncryptionInfoProto getDefaultInstance() {
return defaultInstance;
}
public PerFileEncryptionInfoProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private PerFileEncryptionInfoProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
key_ = input.readBytes();
break;
}
case 18: {
bitField0_ |= 0x00000002;
iv_ = input.readBytes();
break;
}
case 26: {
bitField0_ |= 0x00000004;
ezKeyVersionName_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public PerFileEncryptionInfoProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new PerFileEncryptionInfoProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required bytes key = 1;
public static final int KEY_FIELD_NUMBER = 1;
private com.google.protobuf.ByteString key_;
/**
* required bytes key = 1;
*/
public boolean hasKey() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required bytes key = 1;
*/
public com.google.protobuf.ByteString getKey() {
return key_;
}
// required bytes iv = 2;
public static final int IV_FIELD_NUMBER = 2;
private com.google.protobuf.ByteString iv_;
/**
* required bytes iv = 2;
*/
public boolean hasIv() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required bytes iv = 2;
*/
public com.google.protobuf.ByteString getIv() {
return iv_;
}
// required string ezKeyVersionName = 3;
public static final int EZKEYVERSIONNAME_FIELD_NUMBER = 3;
private java.lang.Object ezKeyVersionName_;
/**
* required string ezKeyVersionName = 3;
*/
public boolean hasEzKeyVersionName() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required string ezKeyVersionName = 3;
*/
public java.lang.String getEzKeyVersionName() {
java.lang.Object ref = ezKeyVersionName_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
ezKeyVersionName_ = s;
}
return s;
}
}
/**
* required string ezKeyVersionName = 3;
*/
public com.google.protobuf.ByteString
getEzKeyVersionNameBytes() {
java.lang.Object ref = ezKeyVersionName_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
ezKeyVersionName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
key_ = com.google.protobuf.ByteString.EMPTY;
iv_ = com.google.protobuf.ByteString.EMPTY;
ezKeyVersionName_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasKey()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasIv()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasEzKeyVersionName()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, key_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, iv_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, getEzKeyVersionNameBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, key_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, iv_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, getEzKeyVersionNameBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto) obj;
boolean result = true;
result = result && (hasKey() == other.hasKey());
if (hasKey()) {
result = result && getKey()
.equals(other.getKey());
}
result = result && (hasIv() == other.hasIv());
if (hasIv()) {
result = result && getIv()
.equals(other.getIv());
}
result = result && (hasEzKeyVersionName() == other.hasEzKeyVersionName());
if (hasEzKeyVersionName()) {
result = result && getEzKeyVersionName()
.equals(other.getEzKeyVersionName());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasKey()) {
hash = (37 * hash) + KEY_FIELD_NUMBER;
hash = (53 * hash) + getKey().hashCode();
}
if (hasIv()) {
hash = (37 * hash) + IV_FIELD_NUMBER;
hash = (53 * hash) + getIv().hashCode();
}
if (hasEzKeyVersionName()) {
hash = (37 * hash) + EZKEYVERSIONNAME_FIELD_NUMBER;
hash = (53 * hash) + getEzKeyVersionName().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.PerFileEncryptionInfoProto}
*
*
**
* Encryption information for an individual
* file within an encryption zone
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
key_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000001);
iv_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000002);
ezKeyVersionName_ = "";
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.key_ = key_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.iv_ = iv_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.ezKeyVersionName_ = ezKeyVersionName_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.getDefaultInstance()) return this;
if (other.hasKey()) {
setKey(other.getKey());
}
if (other.hasIv()) {
setIv(other.getIv());
}
if (other.hasEzKeyVersionName()) {
bitField0_ |= 0x00000004;
ezKeyVersionName_ = other.ezKeyVersionName_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasKey()) {
return false;
}
if (!hasIv()) {
return false;
}
if (!hasEzKeyVersionName()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required bytes key = 1;
private com.google.protobuf.ByteString key_ = com.google.protobuf.ByteString.EMPTY;
/**
* required bytes key = 1;
*/
public boolean hasKey() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required bytes key = 1;
*/
public com.google.protobuf.ByteString getKey() {
return key_;
}
/**
* required bytes key = 1;
*/
public Builder setKey(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
key_ = value;
onChanged();
return this;
}
/**
* required bytes key = 1;
*/
public Builder clearKey() {
bitField0_ = (bitField0_ & ~0x00000001);
key_ = getDefaultInstance().getKey();
onChanged();
return this;
}
// required bytes iv = 2;
private com.google.protobuf.ByteString iv_ = com.google.protobuf.ByteString.EMPTY;
/**
* required bytes iv = 2;
*/
public boolean hasIv() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required bytes iv = 2;
*/
public com.google.protobuf.ByteString getIv() {
return iv_;
}
/**
* required bytes iv = 2;
*/
public Builder setIv(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
iv_ = value;
onChanged();
return this;
}
/**
* required bytes iv = 2;
*/
public Builder clearIv() {
bitField0_ = (bitField0_ & ~0x00000002);
iv_ = getDefaultInstance().getIv();
onChanged();
return this;
}
// required string ezKeyVersionName = 3;
private java.lang.Object ezKeyVersionName_ = "";
/**
* required string ezKeyVersionName = 3;
*/
public boolean hasEzKeyVersionName() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required string ezKeyVersionName = 3;
*/
public java.lang.String getEzKeyVersionName() {
java.lang.Object ref = ezKeyVersionName_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
ezKeyVersionName_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string ezKeyVersionName = 3;
*/
public com.google.protobuf.ByteString
getEzKeyVersionNameBytes() {
java.lang.Object ref = ezKeyVersionName_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
ezKeyVersionName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string ezKeyVersionName = 3;
*/
public Builder setEzKeyVersionName(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
ezKeyVersionName_ = value;
onChanged();
return this;
}
/**
* required string ezKeyVersionName = 3;
*/
public Builder clearEzKeyVersionName() {
bitField0_ = (bitField0_ & ~0x00000004);
ezKeyVersionName_ = getDefaultInstance().getEzKeyVersionName();
onChanged();
return this;
}
/**
* required string ezKeyVersionName = 3;
*/
public Builder setEzKeyVersionNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
ezKeyVersionName_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.PerFileEncryptionInfoProto)
}
static {
defaultInstance = new PerFileEncryptionInfoProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.PerFileEncryptionInfoProto)
}
public interface ZoneEncryptionInfoProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.CipherSuiteProto suite = 1;
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
boolean hasSuite();
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite();
// required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
boolean hasCryptoProtocolVersion();
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion();
// required string keyName = 3;
/**
* required string keyName = 3;
*/
boolean hasKeyName();
/**
* required string keyName = 3;
*/
java.lang.String getKeyName();
/**
* required string keyName = 3;
*/
com.google.protobuf.ByteString
getKeyNameBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.ZoneEncryptionInfoProto}
*
*
**
* Encryption information for an encryption
* zone
*
*/
public static final class ZoneEncryptionInfoProto extends
com.google.protobuf.GeneratedMessage
implements ZoneEncryptionInfoProtoOrBuilder {
// Use ZoneEncryptionInfoProto.newBuilder() to construct.
private ZoneEncryptionInfoProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ZoneEncryptionInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ZoneEncryptionInfoProto defaultInstance;
public static ZoneEncryptionInfoProto getDefaultInstance() {
return defaultInstance;
}
public ZoneEncryptionInfoProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ZoneEncryptionInfoProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
suite_ = value;
}
break;
}
case 16: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(2, rawValue);
} else {
bitField0_ |= 0x00000002;
cryptoProtocolVersion_ = value;
}
break;
}
case 26: {
bitField0_ |= 0x00000004;
keyName_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ZoneEncryptionInfoProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ZoneEncryptionInfoProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.CipherSuiteProto suite = 1;
public static final int SUITE_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto suite_;
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public boolean hasSuite() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() {
return suite_;
}
// required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
public static final int CRYPTOPROTOCOLVERSION_FIELD_NUMBER = 2;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto cryptoProtocolVersion_;
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
public boolean hasCryptoProtocolVersion() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion() {
return cryptoProtocolVersion_;
}
// required string keyName = 3;
public static final int KEYNAME_FIELD_NUMBER = 3;
private java.lang.Object keyName_;
/**
* required string keyName = 3;
*/
public boolean hasKeyName() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required string keyName = 3;
*/
public java.lang.String getKeyName() {
java.lang.Object ref = keyName_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
keyName_ = s;
}
return s;
}
}
/**
* required string keyName = 3;
*/
public com.google.protobuf.ByteString
getKeyNameBytes() {
java.lang.Object ref = keyName_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
keyName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN;
cryptoProtocolVersion_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION;
keyName_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasSuite()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasCryptoProtocolVersion()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasKeyName()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, suite_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeEnum(2, cryptoProtocolVersion_.getNumber());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, getKeyNameBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, suite_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(2, cryptoProtocolVersion_.getNumber());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, getKeyNameBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto) obj;
boolean result = true;
result = result && (hasSuite() == other.hasSuite());
if (hasSuite()) {
result = result &&
(getSuite() == other.getSuite());
}
result = result && (hasCryptoProtocolVersion() == other.hasCryptoProtocolVersion());
if (hasCryptoProtocolVersion()) {
result = result &&
(getCryptoProtocolVersion() == other.getCryptoProtocolVersion());
}
result = result && (hasKeyName() == other.hasKeyName());
if (hasKeyName()) {
result = result && getKeyName()
.equals(other.getKeyName());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasSuite()) {
hash = (37 * hash) + SUITE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getSuite());
}
if (hasCryptoProtocolVersion()) {
hash = (37 * hash) + CRYPTOPROTOCOLVERSION_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getCryptoProtocolVersion());
}
if (hasKeyName()) {
hash = (37 * hash) + KEYNAME_FIELD_NUMBER;
hash = (53 * hash) + getKeyName().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ZoneEncryptionInfoProto}
*
*
**
* Encryption information for an encryption
* zone
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN;
bitField0_ = (bitField0_ & ~0x00000001);
cryptoProtocolVersion_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION;
bitField0_ = (bitField0_ & ~0x00000002);
keyName_ = "";
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.suite_ = suite_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.cryptoProtocolVersion_ = cryptoProtocolVersion_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.keyName_ = keyName_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.getDefaultInstance()) return this;
if (other.hasSuite()) {
setSuite(other.getSuite());
}
if (other.hasCryptoProtocolVersion()) {
setCryptoProtocolVersion(other.getCryptoProtocolVersion());
}
if (other.hasKeyName()) {
bitField0_ |= 0x00000004;
keyName_ = other.keyName_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasSuite()) {
return false;
}
if (!hasCryptoProtocolVersion()) {
return false;
}
if (!hasKeyName()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.CipherSuiteProto suite = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN;
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public boolean hasSuite() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() {
return suite_;
}
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public Builder setSuite(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
suite_ = value;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public Builder clearSuite() {
bitField0_ = (bitField0_ & ~0x00000001);
suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN;
onChanged();
return this;
}
// required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto cryptoProtocolVersion_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION;
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
public boolean hasCryptoProtocolVersion() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion() {
return cryptoProtocolVersion_;
}
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
public Builder setCryptoProtocolVersion(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
cryptoProtocolVersion_ = value;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
public Builder clearCryptoProtocolVersion() {
bitField0_ = (bitField0_ & ~0x00000002);
cryptoProtocolVersion_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION;
onChanged();
return this;
}
// required string keyName = 3;
private java.lang.Object keyName_ = "";
/**
* required string keyName = 3;
*/
public boolean hasKeyName() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required string keyName = 3;
*/
public java.lang.String getKeyName() {
java.lang.Object ref = keyName_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
keyName_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string keyName = 3;
*/
public com.google.protobuf.ByteString
getKeyNameBytes() {
java.lang.Object ref = keyName_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
keyName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string keyName = 3;
*/
public Builder setKeyName(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
keyName_ = value;
onChanged();
return this;
}
/**
* required string keyName = 3;
*/
public Builder clearKeyName() {
bitField0_ = (bitField0_ & ~0x00000004);
keyName_ = getDefaultInstance().getKeyName();
onChanged();
return this;
}
/**
* required string keyName = 3;
*/
public Builder setKeyNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
keyName_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ZoneEncryptionInfoProto)
}
static {
defaultInstance = new ZoneEncryptionInfoProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ZoneEncryptionInfoProto)
}
public interface CipherOptionProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.CipherSuiteProto suite = 1;
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
boolean hasSuite();
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite();
// optional bytes inKey = 2;
/**
* optional bytes inKey = 2;
*/
boolean hasInKey();
/**
* optional bytes inKey = 2;
*/
com.google.protobuf.ByteString getInKey();
// optional bytes inIv = 3;
/**
* optional bytes inIv = 3;
*/
boolean hasInIv();
/**
* optional bytes inIv = 3;
*/
com.google.protobuf.ByteString getInIv();
// optional bytes outKey = 4;
/**
* optional bytes outKey = 4;
*/
boolean hasOutKey();
/**
* optional bytes outKey = 4;
*/
com.google.protobuf.ByteString getOutKey();
// optional bytes outIv = 5;
/**
* optional bytes outIv = 5;
*/
boolean hasOutIv();
/**
* optional bytes outIv = 5;
*/
com.google.protobuf.ByteString getOutIv();
}
/**
* Protobuf type {@code hadoop.hdfs.CipherOptionProto}
*
*
**
* Cipher option
*
*/
public static final class CipherOptionProto extends
com.google.protobuf.GeneratedMessage
implements CipherOptionProtoOrBuilder {
// Use CipherOptionProto.newBuilder() to construct.
private CipherOptionProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private CipherOptionProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final CipherOptionProto defaultInstance;
public static CipherOptionProto getDefaultInstance() {
return defaultInstance;
}
public CipherOptionProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private CipherOptionProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
suite_ = value;
}
break;
}
case 18: {
bitField0_ |= 0x00000002;
inKey_ = input.readBytes();
break;
}
case 26: {
bitField0_ |= 0x00000004;
inIv_ = input.readBytes();
break;
}
case 34: {
bitField0_ |= 0x00000008;
outKey_ = input.readBytes();
break;
}
case 42: {
bitField0_ |= 0x00000010;
outIv_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CipherOptionProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CipherOptionProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public CipherOptionProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new CipherOptionProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.CipherSuiteProto suite = 1;
public static final int SUITE_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto suite_;
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public boolean hasSuite() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() {
return suite_;
}
// optional bytes inKey = 2;
public static final int INKEY_FIELD_NUMBER = 2;
private com.google.protobuf.ByteString inKey_;
/**
* optional bytes inKey = 2;
*/
public boolean hasInKey() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional bytes inKey = 2;
*/
public com.google.protobuf.ByteString getInKey() {
return inKey_;
}
// optional bytes inIv = 3;
public static final int INIV_FIELD_NUMBER = 3;
private com.google.protobuf.ByteString inIv_;
/**
* optional bytes inIv = 3;
*/
public boolean hasInIv() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional bytes inIv = 3;
*/
public com.google.protobuf.ByteString getInIv() {
return inIv_;
}
// optional bytes outKey = 4;
public static final int OUTKEY_FIELD_NUMBER = 4;
private com.google.protobuf.ByteString outKey_;
/**
* optional bytes outKey = 4;
*/
public boolean hasOutKey() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional bytes outKey = 4;
*/
public com.google.protobuf.ByteString getOutKey() {
return outKey_;
}
// optional bytes outIv = 5;
public static final int OUTIV_FIELD_NUMBER = 5;
private com.google.protobuf.ByteString outIv_;
/**
* optional bytes outIv = 5;
*/
public boolean hasOutIv() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional bytes outIv = 5;
*/
public com.google.protobuf.ByteString getOutIv() {
return outIv_;
}
private void initFields() {
suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN;
inKey_ = com.google.protobuf.ByteString.EMPTY;
inIv_ = com.google.protobuf.ByteString.EMPTY;
outKey_ = com.google.protobuf.ByteString.EMPTY;
outIv_ = com.google.protobuf.ByteString.EMPTY;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasSuite()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, suite_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, inKey_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, inIv_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeBytes(4, outKey_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeBytes(5, outIv_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, suite_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, inKey_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, inIv_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(4, outKey_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(5, outIv_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto) obj;
boolean result = true;
result = result && (hasSuite() == other.hasSuite());
if (hasSuite()) {
result = result &&
(getSuite() == other.getSuite());
}
result = result && (hasInKey() == other.hasInKey());
if (hasInKey()) {
result = result && getInKey()
.equals(other.getInKey());
}
result = result && (hasInIv() == other.hasInIv());
if (hasInIv()) {
result = result && getInIv()
.equals(other.getInIv());
}
result = result && (hasOutKey() == other.hasOutKey());
if (hasOutKey()) {
result = result && getOutKey()
.equals(other.getOutKey());
}
result = result && (hasOutIv() == other.hasOutIv());
if (hasOutIv()) {
result = result && getOutIv()
.equals(other.getOutIv());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasSuite()) {
hash = (37 * hash) + SUITE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getSuite());
}
if (hasInKey()) {
hash = (37 * hash) + INKEY_FIELD_NUMBER;
hash = (53 * hash) + getInKey().hashCode();
}
if (hasInIv()) {
hash = (37 * hash) + INIV_FIELD_NUMBER;
hash = (53 * hash) + getInIv().hashCode();
}
if (hasOutKey()) {
hash = (37 * hash) + OUTKEY_FIELD_NUMBER;
hash = (53 * hash) + getOutKey().hashCode();
}
if (hasOutIv()) {
hash = (37 * hash) + OUTIV_FIELD_NUMBER;
hash = (53 * hash) + getOutIv().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.CipherOptionProto}
*
*
**
* Cipher option
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CipherOptionProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CipherOptionProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN;
bitField0_ = (bitField0_ & ~0x00000001);
inKey_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000002);
inIv_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000004);
outKey_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000008);
outIv_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CipherOptionProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.suite_ = suite_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.inKey_ = inKey_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.inIv_ = inIv_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.outKey_ = outKey_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.outIv_ = outIv_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.getDefaultInstance()) return this;
if (other.hasSuite()) {
setSuite(other.getSuite());
}
if (other.hasInKey()) {
setInKey(other.getInKey());
}
if (other.hasInIv()) {
setInIv(other.getInIv());
}
if (other.hasOutKey()) {
setOutKey(other.getOutKey());
}
if (other.hasOutIv()) {
setOutIv(other.getOutIv());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasSuite()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.CipherSuiteProto suite = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN;
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public boolean hasSuite() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() {
return suite_;
}
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public Builder setSuite(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
suite_ = value;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public Builder clearSuite() {
bitField0_ = (bitField0_ & ~0x00000001);
suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN;
onChanged();
return this;
}
// optional bytes inKey = 2;
private com.google.protobuf.ByteString inKey_ = com.google.protobuf.ByteString.EMPTY;
/**
* optional bytes inKey = 2;
*/
public boolean hasInKey() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional bytes inKey = 2;
*/
public com.google.protobuf.ByteString getInKey() {
return inKey_;
}
/**
* optional bytes inKey = 2;
*/
public Builder setInKey(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
inKey_ = value;
onChanged();
return this;
}
/**
* optional bytes inKey = 2;
*/
public Builder clearInKey() {
bitField0_ = (bitField0_ & ~0x00000002);
inKey_ = getDefaultInstance().getInKey();
onChanged();
return this;
}
// optional bytes inIv = 3;
private com.google.protobuf.ByteString inIv_ = com.google.protobuf.ByteString.EMPTY;
/**
* optional bytes inIv = 3;
*/
public boolean hasInIv() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional bytes inIv = 3;
*/
public com.google.protobuf.ByteString getInIv() {
return inIv_;
}
/**
* optional bytes inIv = 3;
*/
public Builder setInIv(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
inIv_ = value;
onChanged();
return this;
}
/**
* optional bytes inIv = 3;
*/
public Builder clearInIv() {
bitField0_ = (bitField0_ & ~0x00000004);
inIv_ = getDefaultInstance().getInIv();
onChanged();
return this;
}
// optional bytes outKey = 4;
private com.google.protobuf.ByteString outKey_ = com.google.protobuf.ByteString.EMPTY;
/**
* optional bytes outKey = 4;
*/
public boolean hasOutKey() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional bytes outKey = 4;
*/
public com.google.protobuf.ByteString getOutKey() {
return outKey_;
}
/**
* optional bytes outKey = 4;
*/
public Builder setOutKey(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
outKey_ = value;
onChanged();
return this;
}
/**
* optional bytes outKey = 4;
*/
public Builder clearOutKey() {
bitField0_ = (bitField0_ & ~0x00000008);
outKey_ = getDefaultInstance().getOutKey();
onChanged();
return this;
}
// optional bytes outIv = 5;
private com.google.protobuf.ByteString outIv_ = com.google.protobuf.ByteString.EMPTY;
/**
* optional bytes outIv = 5;
*/
public boolean hasOutIv() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional bytes outIv = 5;
*/
public com.google.protobuf.ByteString getOutIv() {
return outIv_;
}
/**
* optional bytes outIv = 5;
*/
public Builder setOutIv(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000010;
outIv_ = value;
onChanged();
return this;
}
/**
* optional bytes outIv = 5;
*/
public Builder clearOutIv() {
bitField0_ = (bitField0_ & ~0x00000010);
outIv_ = getDefaultInstance().getOutIv();
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.CipherOptionProto)
}
static {
defaultInstance = new CipherOptionProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.CipherOptionProto)
}
public interface LocatedBlocksProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required uint64 fileLength = 1;
/**
* required uint64 fileLength = 1;
*/
boolean hasFileLength();
/**
* required uint64 fileLength = 1;
*/
long getFileLength();
// repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
java.util.List
getBlocksList();
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index);
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
int getBlocksCount();
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>
getBlocksOrBuilderList();
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder(
int index);
// required bool underConstruction = 3;
/**
* required bool underConstruction = 3;
*/
boolean hasUnderConstruction();
/**
* required bool underConstruction = 3;
*/
boolean getUnderConstruction();
// optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
/**
* optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
*/
boolean hasLastBlock();
/**
* optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getLastBlock();
/**
* optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getLastBlockOrBuilder();
// required bool isLastBlockComplete = 5;
/**
* required bool isLastBlockComplete = 5;
*/
boolean hasIsLastBlockComplete();
/**
* required bool isLastBlockComplete = 5;
*/
boolean getIsLastBlockComplete();
// optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
*/
boolean hasFileEncryptionInfo();
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo();
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.LocatedBlocksProto}
*
*
**
* A set of file blocks and their locations.
*
*/
public static final class LocatedBlocksProto extends
com.google.protobuf.GeneratedMessage
implements LocatedBlocksProtoOrBuilder {
// Use LocatedBlocksProto.newBuilder() to construct.
private LocatedBlocksProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private LocatedBlocksProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final LocatedBlocksProto defaultInstance;
public static LocatedBlocksProto getDefaultInstance() {
return defaultInstance;
}
public LocatedBlocksProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private LocatedBlocksProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
fileLength_ = input.readUInt64();
break;
}
case 18: {
if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
blocks_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000002;
}
blocks_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.PARSER, extensionRegistry));
break;
}
case 24: {
bitField0_ |= 0x00000002;
underConstruction_ = input.readBool();
break;
}
case 34: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
subBuilder = lastBlock_.toBuilder();
}
lastBlock_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(lastBlock_);
lastBlock_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000004;
break;
}
case 40: {
bitField0_ |= 0x00000008;
isLastBlockComplete_ = input.readBool();
break;
}
case 50: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000010) == 0x00000010)) {
subBuilder = fileEncryptionInfo_.toBuilder();
}
fileEncryptionInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(fileEncryptionInfo_);
fileEncryptionInfo_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000010;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
blocks_ = java.util.Collections.unmodifiableList(blocks_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlocksProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlocksProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public LocatedBlocksProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new LocatedBlocksProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required uint64 fileLength = 1;
public static final int FILELENGTH_FIELD_NUMBER = 1;
private long fileLength_;
/**
* required uint64 fileLength = 1;
*/
public boolean hasFileLength() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint64 fileLength = 1;
*/
public long getFileLength() {
return fileLength_;
}
// repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
public static final int BLOCKS_FIELD_NUMBER = 2;
private java.util.List blocks_;
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public java.util.List getBlocksList() {
return blocks_;
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>
getBlocksOrBuilderList() {
return blocks_;
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public int getBlocksCount() {
return blocks_.size();
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index) {
return blocks_.get(index);
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder(
int index) {
return blocks_.get(index);
}
// required bool underConstruction = 3;
public static final int UNDERCONSTRUCTION_FIELD_NUMBER = 3;
private boolean underConstruction_;
/**
* required bool underConstruction = 3;
*/
public boolean hasUnderConstruction() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required bool underConstruction = 3;
*/
public boolean getUnderConstruction() {
return underConstruction_;
}
// optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
public static final int LASTBLOCK_FIELD_NUMBER = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto lastBlock_;
/**
* optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
*/
public boolean hasLastBlock() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getLastBlock() {
return lastBlock_;
}
/**
* optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getLastBlockOrBuilder() {
return lastBlock_;
}
// required bool isLastBlockComplete = 5;
public static final int ISLASTBLOCKCOMPLETE_FIELD_NUMBER = 5;
private boolean isLastBlockComplete_;
/**
* required bool isLastBlockComplete = 5;
*/
public boolean hasIsLastBlockComplete() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required bool isLastBlockComplete = 5;
*/
public boolean getIsLastBlockComplete() {
return isLastBlockComplete_;
}
// optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
public static final int FILEENCRYPTIONINFO_FIELD_NUMBER = 6;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto fileEncryptionInfo_;
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
*/
public boolean hasFileEncryptionInfo() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo() {
return fileEncryptionInfo_;
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder() {
return fileEncryptionInfo_;
}
private void initFields() {
fileLength_ = 0L;
blocks_ = java.util.Collections.emptyList();
underConstruction_ = false;
lastBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance();
isLastBlockComplete_ = false;
fileEncryptionInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasFileLength()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasUnderConstruction()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasIsLastBlockComplete()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getBlocksCount(); i++) {
if (!getBlocks(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasLastBlock()) {
if (!getLastBlock().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasFileEncryptionInfo()) {
if (!getFileEncryptionInfo().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt64(1, fileLength_);
}
for (int i = 0; i < blocks_.size(); i++) {
output.writeMessage(2, blocks_.get(i));
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBool(3, underConstruction_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeMessage(4, lastBlock_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeBool(5, isLastBlockComplete_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeMessage(6, fileEncryptionInfo_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(1, fileLength_);
}
for (int i = 0; i < blocks_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, blocks_.get(i));
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(3, underConstruction_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(4, lastBlock_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(5, isLastBlockComplete_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(6, fileEncryptionInfo_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto) obj;
boolean result = true;
result = result && (hasFileLength() == other.hasFileLength());
if (hasFileLength()) {
result = result && (getFileLength()
== other.getFileLength());
}
result = result && getBlocksList()
.equals(other.getBlocksList());
result = result && (hasUnderConstruction() == other.hasUnderConstruction());
if (hasUnderConstruction()) {
result = result && (getUnderConstruction()
== other.getUnderConstruction());
}
result = result && (hasLastBlock() == other.hasLastBlock());
if (hasLastBlock()) {
result = result && getLastBlock()
.equals(other.getLastBlock());
}
result = result && (hasIsLastBlockComplete() == other.hasIsLastBlockComplete());
if (hasIsLastBlockComplete()) {
result = result && (getIsLastBlockComplete()
== other.getIsLastBlockComplete());
}
result = result && (hasFileEncryptionInfo() == other.hasFileEncryptionInfo());
if (hasFileEncryptionInfo()) {
result = result && getFileEncryptionInfo()
.equals(other.getFileEncryptionInfo());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasFileLength()) {
hash = (37 * hash) + FILELENGTH_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getFileLength());
}
if (getBlocksCount() > 0) {
hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
hash = (53 * hash) + getBlocksList().hashCode();
}
if (hasUnderConstruction()) {
hash = (37 * hash) + UNDERCONSTRUCTION_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getUnderConstruction());
}
if (hasLastBlock()) {
hash = (37 * hash) + LASTBLOCK_FIELD_NUMBER;
hash = (53 * hash) + getLastBlock().hashCode();
}
if (hasIsLastBlockComplete()) {
hash = (37 * hash) + ISLASTBLOCKCOMPLETE_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getIsLastBlockComplete());
}
if (hasFileEncryptionInfo()) {
hash = (37 * hash) + FILEENCRYPTIONINFO_FIELD_NUMBER;
hash = (53 * hash) + getFileEncryptionInfo().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.LocatedBlocksProto}
*
*
**
* A set of file blocks and their locations.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlocksProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlocksProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getBlocksFieldBuilder();
getLastBlockFieldBuilder();
getFileEncryptionInfoFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
fileLength_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
if (blocksBuilder_ == null) {
blocks_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
} else {
blocksBuilder_.clear();
}
underConstruction_ = false;
bitField0_ = (bitField0_ & ~0x00000004);
if (lastBlockBuilder_ == null) {
lastBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance();
} else {
lastBlockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
isLastBlockComplete_ = false;
bitField0_ = (bitField0_ & ~0x00000010);
if (fileEncryptionInfoBuilder_ == null) {
fileEncryptionInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance();
} else {
fileEncryptionInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000020);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlocksProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.fileLength_ = fileLength_;
if (blocksBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002)) {
blocks_ = java.util.Collections.unmodifiableList(blocks_);
bitField0_ = (bitField0_ & ~0x00000002);
}
result.blocks_ = blocks_;
} else {
result.blocks_ = blocksBuilder_.build();
}
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000002;
}
result.underConstruction_ = underConstruction_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000004;
}
if (lastBlockBuilder_ == null) {
result.lastBlock_ = lastBlock_;
} else {
result.lastBlock_ = lastBlockBuilder_.build();
}
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000008;
}
result.isLastBlockComplete_ = isLastBlockComplete_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000010;
}
if (fileEncryptionInfoBuilder_ == null) {
result.fileEncryptionInfo_ = fileEncryptionInfo_;
} else {
result.fileEncryptionInfo_ = fileEncryptionInfoBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance()) return this;
if (other.hasFileLength()) {
setFileLength(other.getFileLength());
}
if (blocksBuilder_ == null) {
if (!other.blocks_.isEmpty()) {
if (blocks_.isEmpty()) {
blocks_ = other.blocks_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureBlocksIsMutable();
blocks_.addAll(other.blocks_);
}
onChanged();
}
} else {
if (!other.blocks_.isEmpty()) {
if (blocksBuilder_.isEmpty()) {
blocksBuilder_.dispose();
blocksBuilder_ = null;
blocks_ = other.blocks_;
bitField0_ = (bitField0_ & ~0x00000002);
blocksBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getBlocksFieldBuilder() : null;
} else {
blocksBuilder_.addAllMessages(other.blocks_);
}
}
}
if (other.hasUnderConstruction()) {
setUnderConstruction(other.getUnderConstruction());
}
if (other.hasLastBlock()) {
mergeLastBlock(other.getLastBlock());
}
if (other.hasIsLastBlockComplete()) {
setIsLastBlockComplete(other.getIsLastBlockComplete());
}
if (other.hasFileEncryptionInfo()) {
mergeFileEncryptionInfo(other.getFileEncryptionInfo());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasFileLength()) {
return false;
}
if (!hasUnderConstruction()) {
return false;
}
if (!hasIsLastBlockComplete()) {
return false;
}
for (int i = 0; i < getBlocksCount(); i++) {
if (!getBlocks(i).isInitialized()) {
return false;
}
}
if (hasLastBlock()) {
if (!getLastBlock().isInitialized()) {
return false;
}
}
if (hasFileEncryptionInfo()) {
if (!getFileEncryptionInfo().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required uint64 fileLength = 1;
private long fileLength_ ;
/**
* required uint64 fileLength = 1;
*/
public boolean hasFileLength() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint64 fileLength = 1;
*/
public long getFileLength() {
return fileLength_;
}
/**
* required uint64 fileLength = 1;
*/
public Builder setFileLength(long value) {
bitField0_ |= 0x00000001;
fileLength_ = value;
onChanged();
return this;
}
/**
* required uint64 fileLength = 1;
*/
public Builder clearFileLength() {
bitField0_ = (bitField0_ & ~0x00000001);
fileLength_ = 0L;
onChanged();
return this;
}
// repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
private java.util.List blocks_ =
java.util.Collections.emptyList();
private void ensureBlocksIsMutable() {
if (!((bitField0_ & 0x00000002) == 0x00000002)) {
blocks_ = new java.util.ArrayList(blocks_);
bitField0_ |= 0x00000002;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> blocksBuilder_;
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public java.util.List getBlocksList() {
if (blocksBuilder_ == null) {
return java.util.Collections.unmodifiableList(blocks_);
} else {
return blocksBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public int getBlocksCount() {
if (blocksBuilder_ == null) {
return blocks_.size();
} else {
return blocksBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index) {
if (blocksBuilder_ == null) {
return blocks_.get(index);
} else {
return blocksBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public Builder setBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.set(index, value);
onChanged();
} else {
blocksBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public Builder setBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.set(index, builderForValue.build());
onChanged();
} else {
blocksBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.add(value);
onChanged();
} else {
blocksBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public Builder addBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.add(index, value);
onChanged();
} else {
blocksBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public Builder addBlocks(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.add(builderForValue.build());
onChanged();
} else {
blocksBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public Builder addBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.add(index, builderForValue.build());
onChanged();
} else {
blocksBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public Builder addAllBlocks(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto> values) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
super.addAll(values, blocks_);
onChanged();
} else {
blocksBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public Builder clearBlocks() {
if (blocksBuilder_ == null) {
blocks_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
} else {
blocksBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public Builder removeBlocks(int index) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.remove(index);
onChanged();
} else {
blocksBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getBlocksBuilder(
int index) {
return getBlocksFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder(
int index) {
if (blocksBuilder_ == null) {
return blocks_.get(index); } else {
return blocksBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>
getBlocksOrBuilderList() {
if (blocksBuilder_ != null) {
return blocksBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(blocks_);
}
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder addBlocksBuilder() {
return getBlocksFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder addBlocksBuilder(
int index) {
return getBlocksFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public java.util.List
getBlocksBuilderList() {
return getBlocksFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>
getBlocksFieldBuilder() {
if (blocksBuilder_ == null) {
blocksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>(
blocks_,
((bitField0_ & 0x00000002) == 0x00000002),
getParentForChildren(),
isClean());
blocks_ = null;
}
return blocksBuilder_;
}
// required bool underConstruction = 3;
private boolean underConstruction_ ;
/**
* required bool underConstruction = 3;
*/
public boolean hasUnderConstruction() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required bool underConstruction = 3;
*/
public boolean getUnderConstruction() {
return underConstruction_;
}
/**
* required bool underConstruction = 3;
*/
public Builder setUnderConstruction(boolean value) {
bitField0_ |= 0x00000004;
underConstruction_ = value;
onChanged();
return this;
}
/**
* required bool underConstruction = 3;
*/
public Builder clearUnderConstruction() {
bitField0_ = (bitField0_ & ~0x00000004);
underConstruction_ = false;
onChanged();
return this;
}
// optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto lastBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> lastBlockBuilder_;
/**
* optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
*/
public boolean hasLastBlock() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getLastBlock() {
if (lastBlockBuilder_ == null) {
return lastBlock_;
} else {
return lastBlockBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
*/
public Builder setLastBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
if (lastBlockBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
lastBlock_ = value;
onChanged();
} else {
lastBlockBuilder_.setMessage(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
*/
public Builder setLastBlock(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) {
if (lastBlockBuilder_ == null) {
lastBlock_ = builderForValue.build();
onChanged();
} else {
lastBlockBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
*/
public Builder mergeLastBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
if (lastBlockBuilder_ == null) {
if (((bitField0_ & 0x00000008) == 0x00000008) &&
lastBlock_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()) {
lastBlock_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder(lastBlock_).mergeFrom(value).buildPartial();
} else {
lastBlock_ = value;
}
onChanged();
} else {
lastBlockBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
*/
public Builder clearLastBlock() {
if (lastBlockBuilder_ == null) {
lastBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance();
onChanged();
} else {
lastBlockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
/**
* optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getLastBlockBuilder() {
bitField0_ |= 0x00000008;
onChanged();
return getLastBlockFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getLastBlockOrBuilder() {
if (lastBlockBuilder_ != null) {
return lastBlockBuilder_.getMessageOrBuilder();
} else {
return lastBlock_;
}
}
/**
* optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>
getLastBlockFieldBuilder() {
if (lastBlockBuilder_ == null) {
lastBlockBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>(
lastBlock_,
getParentForChildren(),
isClean());
lastBlock_ = null;
}
return lastBlockBuilder_;
}
// required bool isLastBlockComplete = 5;
private boolean isLastBlockComplete_ ;
/**
* required bool isLastBlockComplete = 5;
*/
public boolean hasIsLastBlockComplete() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required bool isLastBlockComplete = 5;
*/
public boolean getIsLastBlockComplete() {
return isLastBlockComplete_;
}
/**
* required bool isLastBlockComplete = 5;
*/
public Builder setIsLastBlockComplete(boolean value) {
bitField0_ |= 0x00000010;
isLastBlockComplete_ = value;
onChanged();
return this;
}
/**
* required bool isLastBlockComplete = 5;
*/
public Builder clearIsLastBlockComplete() {
bitField0_ = (bitField0_ & ~0x00000010);
isLastBlockComplete_ = false;
onChanged();
return this;
}
// optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto fileEncryptionInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder> fileEncryptionInfoBuilder_;
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
*/
public boolean hasFileEncryptionInfo() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo() {
if (fileEncryptionInfoBuilder_ == null) {
return fileEncryptionInfo_;
} else {
return fileEncryptionInfoBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
*/
public Builder setFileEncryptionInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto value) {
if (fileEncryptionInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
fileEncryptionInfo_ = value;
onChanged();
} else {
fileEncryptionInfoBuilder_.setMessage(value);
}
bitField0_ |= 0x00000020;
return this;
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
*/
public Builder setFileEncryptionInfo(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder builderForValue) {
if (fileEncryptionInfoBuilder_ == null) {
fileEncryptionInfo_ = builderForValue.build();
onChanged();
} else {
fileEncryptionInfoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000020;
return this;
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
*/
public Builder mergeFileEncryptionInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto value) {
if (fileEncryptionInfoBuilder_ == null) {
if (((bitField0_ & 0x00000020) == 0x00000020) &&
fileEncryptionInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance()) {
fileEncryptionInfo_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.newBuilder(fileEncryptionInfo_).mergeFrom(value).buildPartial();
} else {
fileEncryptionInfo_ = value;
}
onChanged();
} else {
fileEncryptionInfoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000020;
return this;
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
*/
public Builder clearFileEncryptionInfo() {
if (fileEncryptionInfoBuilder_ == null) {
fileEncryptionInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance();
onChanged();
} else {
fileEncryptionInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000020);
return this;
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder getFileEncryptionInfoBuilder() {
bitField0_ |= 0x00000020;
onChanged();
return getFileEncryptionInfoFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder() {
if (fileEncryptionInfoBuilder_ != null) {
return fileEncryptionInfoBuilder_.getMessageOrBuilder();
} else {
return fileEncryptionInfo_;
}
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder>
getFileEncryptionInfoFieldBuilder() {
if (fileEncryptionInfoBuilder_ == null) {
fileEncryptionInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder>(
fileEncryptionInfo_,
getParentForChildren(),
isClean());
fileEncryptionInfo_ = null;
}
return fileEncryptionInfoBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.LocatedBlocksProto)
}
static {
defaultInstance = new LocatedBlocksProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.LocatedBlocksProto)
}
public interface HdfsFileStatusProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1;
/**
* required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1;
*/
boolean hasFileType();
/**
* required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType getFileType();
// required bytes path = 2;
/**
* required bytes path = 2;
*
*
* local name of inode encoded java UTF8
*
*/
boolean hasPath();
/**
* required bytes path = 2;
*
*
* local name of inode encoded java UTF8
*
*/
com.google.protobuf.ByteString getPath();
// required uint64 length = 3;
/**
* required uint64 length = 3;
*/
boolean hasLength();
/**
* required uint64 length = 3;
*/
long getLength();
// required .hadoop.hdfs.FsPermissionProto permission = 4;
/**
* required .hadoop.hdfs.FsPermissionProto permission = 4;
*/
boolean hasPermission();
/**
* required .hadoop.hdfs.FsPermissionProto permission = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getPermission();
/**
* required .hadoop.hdfs.FsPermissionProto permission = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder();
// required string owner = 5;
/**
* required string owner = 5;
*/
boolean hasOwner();
/**
* required string owner = 5;
*/
java.lang.String getOwner();
/**
* required string owner = 5;
*/
com.google.protobuf.ByteString
getOwnerBytes();
// required string group = 6;
/**
* required string group = 6;
*/
boolean hasGroup();
/**
* required string group = 6;
*/
java.lang.String getGroup();
/**
* required string group = 6;
*/
com.google.protobuf.ByteString
getGroupBytes();
// required uint64 modification_time = 7;
/**
* required uint64 modification_time = 7;
*/
boolean hasModificationTime();
/**
* required uint64 modification_time = 7;
*/
long getModificationTime();
// required uint64 access_time = 8;
/**
* required uint64 access_time = 8;
*/
boolean hasAccessTime();
/**
* required uint64 access_time = 8;
*/
long getAccessTime();
// optional bytes symlink = 9;
/**
* optional bytes symlink = 9;
*
*
* Optional fields for symlink
*
*/
boolean hasSymlink();
/**
* optional bytes symlink = 9;
*
*
* Optional fields for symlink
*
*/
com.google.protobuf.ByteString getSymlink();
// optional uint32 block_replication = 10 [default = 0];
/**
* optional uint32 block_replication = 10 [default = 0];
*
*
* Optional fields for file
*
*/
boolean hasBlockReplication();
/**
* optional uint32 block_replication = 10 [default = 0];
*
*
* Optional fields for file
*
*/
int getBlockReplication();
// optional uint64 blocksize = 11 [default = 0];
/**
* optional uint64 blocksize = 11 [default = 0];
*/
boolean hasBlocksize();
/**
* optional uint64 blocksize = 11 [default = 0];
*/
long getBlocksize();
// optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
/**
* optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
*
*
* suppled only if asked by client
*
*/
boolean hasLocations();
/**
* optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
*
*
* suppled only if asked by client
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getLocations();
/**
* optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
*
*
* suppled only if asked by client
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder getLocationsOrBuilder();
// optional uint64 fileId = 13 [default = 0];
/**
* optional uint64 fileId = 13 [default = 0];
*
*
* Optional field for fileId
*
*/
boolean hasFileId();
/**
* optional uint64 fileId = 13 [default = 0];
*
*
* Optional field for fileId
*
*/
long getFileId();
// optional int32 childrenNum = 14 [default = -1];
/**
* optional int32 childrenNum = 14 [default = -1];
*/
boolean hasChildrenNum();
/**
* optional int32 childrenNum = 14 [default = -1];
*/
int getChildrenNum();
// optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
*
*
* Optional field for file encryption
*
*/
boolean hasFileEncryptionInfo();
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
*
*
* Optional field for file encryption
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo();
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
*
*
* Optional field for file encryption
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder();
// optional uint32 storagePolicy = 16 [default = 0];
/**
* optional uint32 storagePolicy = 16 [default = 0];
*
*
* block storage policy id
*
*/
boolean hasStoragePolicy();
/**
* optional uint32 storagePolicy = 16 [default = 0];
*
*
* block storage policy id
*
*/
int getStoragePolicy();
}
/**
* Protobuf type {@code hadoop.hdfs.HdfsFileStatusProto}
*
*
**
* Status of a file, directory or symlink
* Optionally includes a file's block locations if requested by client on the rpc call.
*
*/
public static final class HdfsFileStatusProto extends
com.google.protobuf.GeneratedMessage
implements HdfsFileStatusProtoOrBuilder {
// Use HdfsFileStatusProto.newBuilder() to construct.
private HdfsFileStatusProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private HdfsFileStatusProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final HdfsFileStatusProto defaultInstance;
public static HdfsFileStatusProto getDefaultInstance() {
return defaultInstance;
}
public HdfsFileStatusProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private HdfsFileStatusProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
fileType_ = value;
}
break;
}
case 18: {
bitField0_ |= 0x00000002;
path_ = input.readBytes();
break;
}
case 24: {
bitField0_ |= 0x00000004;
length_ = input.readUInt64();
break;
}
case 34: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000008) == 0x00000008)) {
subBuilder = permission_.toBuilder();
}
permission_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(permission_);
permission_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000008;
break;
}
case 42: {
bitField0_ |= 0x00000010;
owner_ = input.readBytes();
break;
}
case 50: {
bitField0_ |= 0x00000020;
group_ = input.readBytes();
break;
}
case 56: {
bitField0_ |= 0x00000040;
modificationTime_ = input.readUInt64();
break;
}
case 64: {
bitField0_ |= 0x00000080;
accessTime_ = input.readUInt64();
break;
}
case 74: {
bitField0_ |= 0x00000100;
symlink_ = input.readBytes();
break;
}
case 80: {
bitField0_ |= 0x00000200;
blockReplication_ = input.readUInt32();
break;
}
case 88: {
bitField0_ |= 0x00000400;
blocksize_ = input.readUInt64();
break;
}
case 98: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000800) == 0x00000800)) {
subBuilder = locations_.toBuilder();
}
locations_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(locations_);
locations_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000800;
break;
}
case 104: {
bitField0_ |= 0x00001000;
fileId_ = input.readUInt64();
break;
}
case 112: {
bitField0_ |= 0x00002000;
childrenNum_ = input.readInt32();
break;
}
case 122: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder subBuilder = null;
if (((bitField0_ & 0x00004000) == 0x00004000)) {
subBuilder = fileEncryptionInfo_.toBuilder();
}
fileEncryptionInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(fileEncryptionInfo_);
fileEncryptionInfo_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00004000;
break;
}
case 128: {
bitField0_ |= 0x00008000;
storagePolicy_ = input.readUInt32();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsFileStatusProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsFileStatusProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public HdfsFileStatusProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new HdfsFileStatusProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
/**
* Protobuf enum {@code hadoop.hdfs.HdfsFileStatusProto.FileType}
*/
public enum FileType
implements com.google.protobuf.ProtocolMessageEnum {
/**
* IS_DIR = 1;
*/
IS_DIR(0, 1),
/**
* IS_FILE = 2;
*/
IS_FILE(1, 2),
/**
* IS_SYMLINK = 3;
*/
IS_SYMLINK(2, 3),
;
/**
* IS_DIR = 1;
*/
public static final int IS_DIR_VALUE = 1;
/**
* IS_FILE = 2;
*/
public static final int IS_FILE_VALUE = 2;
/**
* IS_SYMLINK = 3;
*/
public static final int IS_SYMLINK_VALUE = 3;
public final int getNumber() { return value; }
public static FileType valueOf(int value) {
switch (value) {
case 1: return IS_DIR;
case 2: return IS_FILE;
case 3: return IS_SYMLINK;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public FileType findValueByNumber(int number) {
return FileType.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDescriptor().getEnumTypes().get(0);
}
private static final FileType[] VALUES = values();
public static FileType valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private FileType(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.HdfsFileStatusProto.FileType)
}
private int bitField0_;
// required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1;
public static final int FILETYPE_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType fileType_;
/**
* required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1;
*/
public boolean hasFileType() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType getFileType() {
return fileType_;
}
// required bytes path = 2;
public static final int PATH_FIELD_NUMBER = 2;
private com.google.protobuf.ByteString path_;
/**
* required bytes path = 2;
*
*
* local name of inode encoded java UTF8
*
*/
public boolean hasPath() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required bytes path = 2;
*
*
* local name of inode encoded java UTF8
*
*/
public com.google.protobuf.ByteString getPath() {
return path_;
}
// required uint64 length = 3;
public static final int LENGTH_FIELD_NUMBER = 3;
private long length_;
/**
* required uint64 length = 3;
*/
public boolean hasLength() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 length = 3;
*/
public long getLength() {
return length_;
}
// required .hadoop.hdfs.FsPermissionProto permission = 4;
public static final int PERMISSION_FIELD_NUMBER = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto permission_;
/**
* required .hadoop.hdfs.FsPermissionProto permission = 4;
*/
public boolean hasPermission() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required .hadoop.hdfs.FsPermissionProto permission = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getPermission() {
return permission_;
}
/**
* required .hadoop.hdfs.FsPermissionProto permission = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder() {
return permission_;
}
// required string owner = 5;
public static final int OWNER_FIELD_NUMBER = 5;
private java.lang.Object owner_;
/**
* required string owner = 5;
*/
public boolean hasOwner() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required string owner = 5;
*/
public java.lang.String getOwner() {
java.lang.Object ref = owner_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
owner_ = s;
}
return s;
}
}
/**
* required string owner = 5;
*/
public com.google.protobuf.ByteString
getOwnerBytes() {
java.lang.Object ref = owner_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
owner_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required string group = 6;
public static final int GROUP_FIELD_NUMBER = 6;
private java.lang.Object group_;
/**
* required string group = 6;
*/
public boolean hasGroup() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* required string group = 6;
*/
public java.lang.String getGroup() {
java.lang.Object ref = group_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
group_ = s;
}
return s;
}
}
/**
* required string group = 6;
*/
public com.google.protobuf.ByteString
getGroupBytes() {
java.lang.Object ref = group_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
group_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required uint64 modification_time = 7;
public static final int MODIFICATION_TIME_FIELD_NUMBER = 7;
private long modificationTime_;
/**
* required uint64 modification_time = 7;
*/
public boolean hasModificationTime() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* required uint64 modification_time = 7;
*/
public long getModificationTime() {
return modificationTime_;
}
// required uint64 access_time = 8;
public static final int ACCESS_TIME_FIELD_NUMBER = 8;
private long accessTime_;
/**
* required uint64 access_time = 8;
*/
public boolean hasAccessTime() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* required uint64 access_time = 8;
*/
public long getAccessTime() {
return accessTime_;
}
// optional bytes symlink = 9;
public static final int SYMLINK_FIELD_NUMBER = 9;
private com.google.protobuf.ByteString symlink_;
/**
* optional bytes symlink = 9;
*
*
* Optional fields for symlink
*
*/
public boolean hasSymlink() {
return ((bitField0_ & 0x00000100) == 0x00000100);
}
/**
* optional bytes symlink = 9;
*
*
* Optional fields for symlink
*
*/
public com.google.protobuf.ByteString getSymlink() {
return symlink_;
}
// optional uint32 block_replication = 10 [default = 0];
public static final int BLOCK_REPLICATION_FIELD_NUMBER = 10;
private int blockReplication_;
/**
* optional uint32 block_replication = 10 [default = 0];
*
*
* Optional fields for file
*
*/
public boolean hasBlockReplication() {
return ((bitField0_ & 0x00000200) == 0x00000200);
}
/**
* optional uint32 block_replication = 10 [default = 0];
*
*
* Optional fields for file
*
*/
public int getBlockReplication() {
return blockReplication_;
}
// optional uint64 blocksize = 11 [default = 0];
public static final int BLOCKSIZE_FIELD_NUMBER = 11;
private long blocksize_;
/**
* optional uint64 blocksize = 11 [default = 0];
*/
public boolean hasBlocksize() {
return ((bitField0_ & 0x00000400) == 0x00000400);
}
/**
* optional uint64 blocksize = 11 [default = 0];
*/
public long getBlocksize() {
return blocksize_;
}
// optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
public static final int LOCATIONS_FIELD_NUMBER = 12;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto locations_;
/**
* optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
*
*
* suppled only if asked by client
*
*/
public boolean hasLocations() {
return ((bitField0_ & 0x00000800) == 0x00000800);
}
/**
* optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
*
*
* suppled only if asked by client
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getLocations() {
return locations_;
}
/**
* optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
*
*
* suppled only if asked by client
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder getLocationsOrBuilder() {
return locations_;
}
// optional uint64 fileId = 13 [default = 0];
public static final int FILEID_FIELD_NUMBER = 13;
private long fileId_;
/**
* optional uint64 fileId = 13 [default = 0];
*
*
* Optional field for fileId
*
*/
public boolean hasFileId() {
return ((bitField0_ & 0x00001000) == 0x00001000);
}
/**
* optional uint64 fileId = 13 [default = 0];
*
*
* Optional field for fileId
*
*/
public long getFileId() {
return fileId_;
}
// optional int32 childrenNum = 14 [default = -1];
public static final int CHILDRENNUM_FIELD_NUMBER = 14;
private int childrenNum_;
/**
* optional int32 childrenNum = 14 [default = -1];
*/
public boolean hasChildrenNum() {
return ((bitField0_ & 0x00002000) == 0x00002000);
}
/**
* optional int32 childrenNum = 14 [default = -1];
*/
public int getChildrenNum() {
return childrenNum_;
}
// optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
public static final int FILEENCRYPTIONINFO_FIELD_NUMBER = 15;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto fileEncryptionInfo_;
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
*
*
* Optional field for file encryption
*
*/
public boolean hasFileEncryptionInfo() {
return ((bitField0_ & 0x00004000) == 0x00004000);
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
*
*
* Optional field for file encryption
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo() {
return fileEncryptionInfo_;
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
*
*
* Optional field for file encryption
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder() {
return fileEncryptionInfo_;
}
// optional uint32 storagePolicy = 16 [default = 0];
public static final int STORAGEPOLICY_FIELD_NUMBER = 16;
private int storagePolicy_;
/**
* optional uint32 storagePolicy = 16 [default = 0];
*
*
* block storage policy id
*
*/
public boolean hasStoragePolicy() {
return ((bitField0_ & 0x00008000) == 0x00008000);
}
/**
* optional uint32 storagePolicy = 16 [default = 0];
*
*
* block storage policy id
*
*/
public int getStoragePolicy() {
return storagePolicy_;
}
private void initFields() {
fileType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.IS_DIR;
path_ = com.google.protobuf.ByteString.EMPTY;
length_ = 0L;
permission_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance();
owner_ = "";
group_ = "";
modificationTime_ = 0L;
accessTime_ = 0L;
symlink_ = com.google.protobuf.ByteString.EMPTY;
blockReplication_ = 0;
blocksize_ = 0L;
locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance();
fileId_ = 0L;
childrenNum_ = -1;
fileEncryptionInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance();
storagePolicy_ = 0;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasFileType()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasPath()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasLength()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasPermission()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasOwner()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasGroup()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasModificationTime()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasAccessTime()) {
memoizedIsInitialized = 0;
return false;
}
if (!getPermission().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
if (hasLocations()) {
if (!getLocations().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasFileEncryptionInfo()) {
if (!getFileEncryptionInfo().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, fileType_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, path_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt64(3, length_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeMessage(4, permission_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeBytes(5, getOwnerBytes());
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeBytes(6, getGroupBytes());
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeUInt64(7, modificationTime_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
output.writeUInt64(8, accessTime_);
}
if (((bitField0_ & 0x00000100) == 0x00000100)) {
output.writeBytes(9, symlink_);
}
if (((bitField0_ & 0x00000200) == 0x00000200)) {
output.writeUInt32(10, blockReplication_);
}
if (((bitField0_ & 0x00000400) == 0x00000400)) {
output.writeUInt64(11, blocksize_);
}
if (((bitField0_ & 0x00000800) == 0x00000800)) {
output.writeMessage(12, locations_);
}
if (((bitField0_ & 0x00001000) == 0x00001000)) {
output.writeUInt64(13, fileId_);
}
if (((bitField0_ & 0x00002000) == 0x00002000)) {
output.writeInt32(14, childrenNum_);
}
if (((bitField0_ & 0x00004000) == 0x00004000)) {
output.writeMessage(15, fileEncryptionInfo_);
}
if (((bitField0_ & 0x00008000) == 0x00008000)) {
output.writeUInt32(16, storagePolicy_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, fileType_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, path_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(3, length_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(4, permission_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(5, getOwnerBytes());
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(6, getGroupBytes());
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(7, modificationTime_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(8, accessTime_);
}
if (((bitField0_ & 0x00000100) == 0x00000100)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(9, symlink_);
}
if (((bitField0_ & 0x00000200) == 0x00000200)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(10, blockReplication_);
}
if (((bitField0_ & 0x00000400) == 0x00000400)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(11, blocksize_);
}
if (((bitField0_ & 0x00000800) == 0x00000800)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(12, locations_);
}
if (((bitField0_ & 0x00001000) == 0x00001000)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(13, fileId_);
}
if (((bitField0_ & 0x00002000) == 0x00002000)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(14, childrenNum_);
}
if (((bitField0_ & 0x00004000) == 0x00004000)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(15, fileEncryptionInfo_);
}
if (((bitField0_ & 0x00008000) == 0x00008000)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(16, storagePolicy_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto) obj;
boolean result = true;
result = result && (hasFileType() == other.hasFileType());
if (hasFileType()) {
result = result &&
(getFileType() == other.getFileType());
}
result = result && (hasPath() == other.hasPath());
if (hasPath()) {
result = result && getPath()
.equals(other.getPath());
}
result = result && (hasLength() == other.hasLength());
if (hasLength()) {
result = result && (getLength()
== other.getLength());
}
result = result && (hasPermission() == other.hasPermission());
if (hasPermission()) {
result = result && getPermission()
.equals(other.getPermission());
}
result = result && (hasOwner() == other.hasOwner());
if (hasOwner()) {
result = result && getOwner()
.equals(other.getOwner());
}
result = result && (hasGroup() == other.hasGroup());
if (hasGroup()) {
result = result && getGroup()
.equals(other.getGroup());
}
result = result && (hasModificationTime() == other.hasModificationTime());
if (hasModificationTime()) {
result = result && (getModificationTime()
== other.getModificationTime());
}
result = result && (hasAccessTime() == other.hasAccessTime());
if (hasAccessTime()) {
result = result && (getAccessTime()
== other.getAccessTime());
}
result = result && (hasSymlink() == other.hasSymlink());
if (hasSymlink()) {
result = result && getSymlink()
.equals(other.getSymlink());
}
result = result && (hasBlockReplication() == other.hasBlockReplication());
if (hasBlockReplication()) {
result = result && (getBlockReplication()
== other.getBlockReplication());
}
result = result && (hasBlocksize() == other.hasBlocksize());
if (hasBlocksize()) {
result = result && (getBlocksize()
== other.getBlocksize());
}
result = result && (hasLocations() == other.hasLocations());
if (hasLocations()) {
result = result && getLocations()
.equals(other.getLocations());
}
result = result && (hasFileId() == other.hasFileId());
if (hasFileId()) {
result = result && (getFileId()
== other.getFileId());
}
result = result && (hasChildrenNum() == other.hasChildrenNum());
if (hasChildrenNum()) {
result = result && (getChildrenNum()
== other.getChildrenNum());
}
result = result && (hasFileEncryptionInfo() == other.hasFileEncryptionInfo());
if (hasFileEncryptionInfo()) {
result = result && getFileEncryptionInfo()
.equals(other.getFileEncryptionInfo());
}
result = result && (hasStoragePolicy() == other.hasStoragePolicy());
if (hasStoragePolicy()) {
result = result && (getStoragePolicy()
== other.getStoragePolicy());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasFileType()) {
hash = (37 * hash) + FILETYPE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getFileType());
}
if (hasPath()) {
hash = (37 * hash) + PATH_FIELD_NUMBER;
hash = (53 * hash) + getPath().hashCode();
}
if (hasLength()) {
hash = (37 * hash) + LENGTH_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getLength());
}
if (hasPermission()) {
hash = (37 * hash) + PERMISSION_FIELD_NUMBER;
hash = (53 * hash) + getPermission().hashCode();
}
if (hasOwner()) {
hash = (37 * hash) + OWNER_FIELD_NUMBER;
hash = (53 * hash) + getOwner().hashCode();
}
if (hasGroup()) {
hash = (37 * hash) + GROUP_FIELD_NUMBER;
hash = (53 * hash) + getGroup().hashCode();
}
if (hasModificationTime()) {
hash = (37 * hash) + MODIFICATION_TIME_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getModificationTime());
}
if (hasAccessTime()) {
hash = (37 * hash) + ACCESS_TIME_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getAccessTime());
}
if (hasSymlink()) {
hash = (37 * hash) + SYMLINK_FIELD_NUMBER;
hash = (53 * hash) + getSymlink().hashCode();
}
if (hasBlockReplication()) {
hash = (37 * hash) + BLOCK_REPLICATION_FIELD_NUMBER;
hash = (53 * hash) + getBlockReplication();
}
if (hasBlocksize()) {
hash = (37 * hash) + BLOCKSIZE_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getBlocksize());
}
if (hasLocations()) {
hash = (37 * hash) + LOCATIONS_FIELD_NUMBER;
hash = (53 * hash) + getLocations().hashCode();
}
if (hasFileId()) {
hash = (37 * hash) + FILEID_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getFileId());
}
if (hasChildrenNum()) {
hash = (37 * hash) + CHILDRENNUM_FIELD_NUMBER;
hash = (53 * hash) + getChildrenNum();
}
if (hasFileEncryptionInfo()) {
hash = (37 * hash) + FILEENCRYPTIONINFO_FIELD_NUMBER;
hash = (53 * hash) + getFileEncryptionInfo().hashCode();
}
if (hasStoragePolicy()) {
hash = (37 * hash) + STORAGEPOLICY_FIELD_NUMBER;
hash = (53 * hash) + getStoragePolicy();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.HdfsFileStatusProto}
*
*
**
* Status of a file, directory or symlink
* Optionally includes a file's block locations if requested by client on the rpc call.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsFileStatusProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsFileStatusProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getPermissionFieldBuilder();
getLocationsFieldBuilder();
getFileEncryptionInfoFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
fileType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.IS_DIR;
bitField0_ = (bitField0_ & ~0x00000001);
path_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000002);
length_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
if (permissionBuilder_ == null) {
permission_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance();
} else {
permissionBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
owner_ = "";
bitField0_ = (bitField0_ & ~0x00000010);
group_ = "";
bitField0_ = (bitField0_ & ~0x00000020);
modificationTime_ = 0L;
bitField0_ = (bitField0_ & ~0x00000040);
accessTime_ = 0L;
bitField0_ = (bitField0_ & ~0x00000080);
symlink_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000100);
blockReplication_ = 0;
bitField0_ = (bitField0_ & ~0x00000200);
blocksize_ = 0L;
bitField0_ = (bitField0_ & ~0x00000400);
if (locationsBuilder_ == null) {
locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance();
} else {
locationsBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000800);
fileId_ = 0L;
bitField0_ = (bitField0_ & ~0x00001000);
childrenNum_ = -1;
bitField0_ = (bitField0_ & ~0x00002000);
if (fileEncryptionInfoBuilder_ == null) {
fileEncryptionInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance();
} else {
fileEncryptionInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00004000);
storagePolicy_ = 0;
bitField0_ = (bitField0_ & ~0x00008000);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsFileStatusProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.fileType_ = fileType_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.path_ = path_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.length_ = length_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
if (permissionBuilder_ == null) {
result.permission_ = permission_;
} else {
result.permission_ = permissionBuilder_.build();
}
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.owner_ = owner_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000020;
}
result.group_ = group_;
if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
to_bitField0_ |= 0x00000040;
}
result.modificationTime_ = modificationTime_;
if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
to_bitField0_ |= 0x00000080;
}
result.accessTime_ = accessTime_;
if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
to_bitField0_ |= 0x00000100;
}
result.symlink_ = symlink_;
if (((from_bitField0_ & 0x00000200) == 0x00000200)) {
to_bitField0_ |= 0x00000200;
}
result.blockReplication_ = blockReplication_;
if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
to_bitField0_ |= 0x00000400;
}
result.blocksize_ = blocksize_;
if (((from_bitField0_ & 0x00000800) == 0x00000800)) {
to_bitField0_ |= 0x00000800;
}
if (locationsBuilder_ == null) {
result.locations_ = locations_;
} else {
result.locations_ = locationsBuilder_.build();
}
if (((from_bitField0_ & 0x00001000) == 0x00001000)) {
to_bitField0_ |= 0x00001000;
}
result.fileId_ = fileId_;
if (((from_bitField0_ & 0x00002000) == 0x00002000)) {
to_bitField0_ |= 0x00002000;
}
result.childrenNum_ = childrenNum_;
if (((from_bitField0_ & 0x00004000) == 0x00004000)) {
to_bitField0_ |= 0x00004000;
}
if (fileEncryptionInfoBuilder_ == null) {
result.fileEncryptionInfo_ = fileEncryptionInfo_;
} else {
result.fileEncryptionInfo_ = fileEncryptionInfoBuilder_.build();
}
if (((from_bitField0_ & 0x00008000) == 0x00008000)) {
to_bitField0_ |= 0x00008000;
}
result.storagePolicy_ = storagePolicy_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()) return this;
if (other.hasFileType()) {
setFileType(other.getFileType());
}
if (other.hasPath()) {
setPath(other.getPath());
}
if (other.hasLength()) {
setLength(other.getLength());
}
if (other.hasPermission()) {
mergePermission(other.getPermission());
}
if (other.hasOwner()) {
bitField0_ |= 0x00000010;
owner_ = other.owner_;
onChanged();
}
if (other.hasGroup()) {
bitField0_ |= 0x00000020;
group_ = other.group_;
onChanged();
}
if (other.hasModificationTime()) {
setModificationTime(other.getModificationTime());
}
if (other.hasAccessTime()) {
setAccessTime(other.getAccessTime());
}
if (other.hasSymlink()) {
setSymlink(other.getSymlink());
}
if (other.hasBlockReplication()) {
setBlockReplication(other.getBlockReplication());
}
if (other.hasBlocksize()) {
setBlocksize(other.getBlocksize());
}
if (other.hasLocations()) {
mergeLocations(other.getLocations());
}
if (other.hasFileId()) {
setFileId(other.getFileId());
}
if (other.hasChildrenNum()) {
setChildrenNum(other.getChildrenNum());
}
if (other.hasFileEncryptionInfo()) {
mergeFileEncryptionInfo(other.getFileEncryptionInfo());
}
if (other.hasStoragePolicy()) {
setStoragePolicy(other.getStoragePolicy());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasFileType()) {
return false;
}
if (!hasPath()) {
return false;
}
if (!hasLength()) {
return false;
}
if (!hasPermission()) {
return false;
}
if (!hasOwner()) {
return false;
}
if (!hasGroup()) {
return false;
}
if (!hasModificationTime()) {
return false;
}
if (!hasAccessTime()) {
return false;
}
if (!getPermission().isInitialized()) {
return false;
}
if (hasLocations()) {
if (!getLocations().isInitialized()) {
return false;
}
}
if (hasFileEncryptionInfo()) {
if (!getFileEncryptionInfo().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType fileType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.IS_DIR;
/**
* required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1;
*/
public boolean hasFileType() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType getFileType() {
return fileType_;
}
/**
* required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1;
*/
public Builder setFileType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
fileType_ = value;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1;
*/
public Builder clearFileType() {
bitField0_ = (bitField0_ & ~0x00000001);
fileType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.IS_DIR;
onChanged();
return this;
}
// required bytes path = 2;
private com.google.protobuf.ByteString path_ = com.google.protobuf.ByteString.EMPTY;
/**
* required bytes path = 2;
*
*
* local name of inode encoded java UTF8
*
*/
public boolean hasPath() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required bytes path = 2;
*
*
* local name of inode encoded java UTF8
*
*/
public com.google.protobuf.ByteString getPath() {
return path_;
}
/**
* required bytes path = 2;
*
*
* local name of inode encoded java UTF8
*
*/
public Builder setPath(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
path_ = value;
onChanged();
return this;
}
/**
* required bytes path = 2;
*
*
* local name of inode encoded java UTF8
*
*/
public Builder clearPath() {
bitField0_ = (bitField0_ & ~0x00000002);
path_ = getDefaultInstance().getPath();
onChanged();
return this;
}
// required uint64 length = 3;
private long length_ ;
/**
* required uint64 length = 3;
*/
public boolean hasLength() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 length = 3;
*/
public long getLength() {
return length_;
}
/**
* required uint64 length = 3;
*/
public Builder setLength(long value) {
bitField0_ |= 0x00000004;
length_ = value;
onChanged();
return this;
}
/**
* required uint64 length = 3;
*/
public Builder clearLength() {
bitField0_ = (bitField0_ & ~0x00000004);
length_ = 0L;
onChanged();
return this;
}
// required .hadoop.hdfs.FsPermissionProto permission = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto permission_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder> permissionBuilder_;
/**
* required .hadoop.hdfs.FsPermissionProto permission = 4;
*/
public boolean hasPermission() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required .hadoop.hdfs.FsPermissionProto permission = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getPermission() {
if (permissionBuilder_ == null) {
return permission_;
} else {
return permissionBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.FsPermissionProto permission = 4;
*/
public Builder setPermission(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto value) {
if (permissionBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
permission_ = value;
onChanged();
} else {
permissionBuilder_.setMessage(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* required .hadoop.hdfs.FsPermissionProto permission = 4;
*/
public Builder setPermission(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder builderForValue) {
if (permissionBuilder_ == null) {
permission_ = builderForValue.build();
onChanged();
} else {
permissionBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000008;
return this;
}
/**
* required .hadoop.hdfs.FsPermissionProto permission = 4;
*/
public Builder mergePermission(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto value) {
if (permissionBuilder_ == null) {
if (((bitField0_ & 0x00000008) == 0x00000008) &&
permission_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance()) {
permission_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.newBuilder(permission_).mergeFrom(value).buildPartial();
} else {
permission_ = value;
}
onChanged();
} else {
permissionBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* required .hadoop.hdfs.FsPermissionProto permission = 4;
*/
public Builder clearPermission() {
if (permissionBuilder_ == null) {
permission_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance();
onChanged();
} else {
permissionBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
/**
* required .hadoop.hdfs.FsPermissionProto permission = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder getPermissionBuilder() {
bitField0_ |= 0x00000008;
onChanged();
return getPermissionFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.FsPermissionProto permission = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder() {
if (permissionBuilder_ != null) {
return permissionBuilder_.getMessageOrBuilder();
} else {
return permission_;
}
}
/**
* required .hadoop.hdfs.FsPermissionProto permission = 4;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder>
getPermissionFieldBuilder() {
if (permissionBuilder_ == null) {
permissionBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder>(
permission_,
getParentForChildren(),
isClean());
permission_ = null;
}
return permissionBuilder_;
}
// required string owner = 5;
private java.lang.Object owner_ = "";
/**
* required string owner = 5;
*/
public boolean hasOwner() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required string owner = 5;
*/
public java.lang.String getOwner() {
java.lang.Object ref = owner_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
owner_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string owner = 5;
*/
public com.google.protobuf.ByteString
getOwnerBytes() {
java.lang.Object ref = owner_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
owner_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string owner = 5;
*/
public Builder setOwner(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000010;
owner_ = value;
onChanged();
return this;
}
/**
* required string owner = 5;
*/
public Builder clearOwner() {
bitField0_ = (bitField0_ & ~0x00000010);
owner_ = getDefaultInstance().getOwner();
onChanged();
return this;
}
/**
* required string owner = 5;
*/
public Builder setOwnerBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000010;
owner_ = value;
onChanged();
return this;
}
// required string group = 6;
private java.lang.Object group_ = "";
/**
* required string group = 6;
*/
public boolean hasGroup() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* required string group = 6;
*/
public java.lang.String getGroup() {
java.lang.Object ref = group_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
group_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string group = 6;
*/
public com.google.protobuf.ByteString
getGroupBytes() {
java.lang.Object ref = group_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
group_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string group = 6;
*/
public Builder setGroup(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000020;
group_ = value;
onChanged();
return this;
}
/**
* required string group = 6;
*/
public Builder clearGroup() {
bitField0_ = (bitField0_ & ~0x00000020);
group_ = getDefaultInstance().getGroup();
onChanged();
return this;
}
/**
* required string group = 6;
*/
public Builder setGroupBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000020;
group_ = value;
onChanged();
return this;
}
// required uint64 modification_time = 7;
private long modificationTime_ ;
/**
* required uint64 modification_time = 7;
*/
public boolean hasModificationTime() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* required uint64 modification_time = 7;
*/
public long getModificationTime() {
return modificationTime_;
}
/**
* required uint64 modification_time = 7;
*/
public Builder setModificationTime(long value) {
bitField0_ |= 0x00000040;
modificationTime_ = value;
onChanged();
return this;
}
/**
* required uint64 modification_time = 7;
*/
public Builder clearModificationTime() {
bitField0_ = (bitField0_ & ~0x00000040);
modificationTime_ = 0L;
onChanged();
return this;
}
// required uint64 access_time = 8;
private long accessTime_ ;
/**
* required uint64 access_time = 8;
*/
public boolean hasAccessTime() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* required uint64 access_time = 8;
*/
public long getAccessTime() {
return accessTime_;
}
/**
* required uint64 access_time = 8;
*/
public Builder setAccessTime(long value) {
bitField0_ |= 0x00000080;
accessTime_ = value;
onChanged();
return this;
}
/**
* required uint64 access_time = 8;
*/
public Builder clearAccessTime() {
bitField0_ = (bitField0_ & ~0x00000080);
accessTime_ = 0L;
onChanged();
return this;
}
// optional bytes symlink = 9;
private com.google.protobuf.ByteString symlink_ = com.google.protobuf.ByteString.EMPTY;
/**
* optional bytes symlink = 9;
*
*
* Optional fields for symlink
*
*/
public boolean hasSymlink() {
return ((bitField0_ & 0x00000100) == 0x00000100);
}
/**
* optional bytes symlink = 9;
*
*
* Optional fields for symlink
*
*/
public com.google.protobuf.ByteString getSymlink() {
return symlink_;
}
/**
* optional bytes symlink = 9;
*
*
* Optional fields for symlink
*
*/
public Builder setSymlink(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000100;
symlink_ = value;
onChanged();
return this;
}
/**
* optional bytes symlink = 9;
*
*
* Optional fields for symlink
*
*/
public Builder clearSymlink() {
bitField0_ = (bitField0_ & ~0x00000100);
symlink_ = getDefaultInstance().getSymlink();
onChanged();
return this;
}
// optional uint32 block_replication = 10 [default = 0];
private int blockReplication_ ;
/**
* optional uint32 block_replication = 10 [default = 0];
*
*
* Optional fields for file
*
*/
public boolean hasBlockReplication() {
return ((bitField0_ & 0x00000200) == 0x00000200);
}
/**
* optional uint32 block_replication = 10 [default = 0];
*
*
* Optional fields for file
*
*/
public int getBlockReplication() {
return blockReplication_;
}
/**
* optional uint32 block_replication = 10 [default = 0];
*
*
* Optional fields for file
*
*/
public Builder setBlockReplication(int value) {
bitField0_ |= 0x00000200;
blockReplication_ = value;
onChanged();
return this;
}
/**
* optional uint32 block_replication = 10 [default = 0];
*
*
* Optional fields for file
*
*/
public Builder clearBlockReplication() {
bitField0_ = (bitField0_ & ~0x00000200);
blockReplication_ = 0;
onChanged();
return this;
}
// optional uint64 blocksize = 11 [default = 0];
private long blocksize_ ;
/**
* optional uint64 blocksize = 11 [default = 0];
*/
public boolean hasBlocksize() {
return ((bitField0_ & 0x00000400) == 0x00000400);
}
/**
* optional uint64 blocksize = 11 [default = 0];
*/
public long getBlocksize() {
return blocksize_;
}
/**
* optional uint64 blocksize = 11 [default = 0];
*/
public Builder setBlocksize(long value) {
bitField0_ |= 0x00000400;
blocksize_ = value;
onChanged();
return this;
}
/**
* optional uint64 blocksize = 11 [default = 0];
*/
public Builder clearBlocksize() {
bitField0_ = (bitField0_ & ~0x00000400);
blocksize_ = 0L;
onChanged();
return this;
}
// optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder> locationsBuilder_;
/**
* optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
*
*
* suppled only if asked by client
*
*/
public boolean hasLocations() {
return ((bitField0_ & 0x00000800) == 0x00000800);
}
/**
* optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
*
*
* suppled only if asked by client
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getLocations() {
if (locationsBuilder_ == null) {
return locations_;
} else {
return locationsBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
*
*
* suppled only if asked by client
*
*/
public Builder setLocations(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto value) {
if (locationsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
locations_ = value;
onChanged();
} else {
locationsBuilder_.setMessage(value);
}
bitField0_ |= 0x00000800;
return this;
}
/**
* optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
*
*
* suppled only if asked by client
*
*/
public Builder setLocations(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder builderForValue) {
if (locationsBuilder_ == null) {
locations_ = builderForValue.build();
onChanged();
} else {
locationsBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000800;
return this;
}
/**
* optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
*
*
* suppled only if asked by client
*
*/
public Builder mergeLocations(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto value) {
if (locationsBuilder_ == null) {
if (((bitField0_ & 0x00000800) == 0x00000800) &&
locations_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance()) {
locations_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.newBuilder(locations_).mergeFrom(value).buildPartial();
} else {
locations_ = value;
}
onChanged();
} else {
locationsBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000800;
return this;
}
/**
* optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
*
*
* suppled only if asked by client
*
*/
public Builder clearLocations() {
if (locationsBuilder_ == null) {
locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance();
onChanged();
} else {
locationsBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000800);
return this;
}
/**
* optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
*
*
* suppled only if asked by client
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder getLocationsBuilder() {
bitField0_ |= 0x00000800;
onChanged();
return getLocationsFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
*
*
* suppled only if asked by client
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder getLocationsOrBuilder() {
if (locationsBuilder_ != null) {
return locationsBuilder_.getMessageOrBuilder();
} else {
return locations_;
}
}
/**
* optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
*
*
* suppled only if asked by client
*
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder>
getLocationsFieldBuilder() {
if (locationsBuilder_ == null) {
locationsBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder>(
locations_,
getParentForChildren(),
isClean());
locations_ = null;
}
return locationsBuilder_;
}
// optional uint64 fileId = 13 [default = 0];
private long fileId_ ;
/**
* optional uint64 fileId = 13 [default = 0];
*
*
* Optional field for fileId
*
*/
public boolean hasFileId() {
return ((bitField0_ & 0x00001000) == 0x00001000);
}
/**
* optional uint64 fileId = 13 [default = 0];
*
*
* Optional field for fileId
*
*/
public long getFileId() {
return fileId_;
}
/**
* optional uint64 fileId = 13 [default = 0];
*
*
* Optional field for fileId
*
*/
public Builder setFileId(long value) {
bitField0_ |= 0x00001000;
fileId_ = value;
onChanged();
return this;
}
/**
* optional uint64 fileId = 13 [default = 0];
*
*
* Optional field for fileId
*
*/
public Builder clearFileId() {
bitField0_ = (bitField0_ & ~0x00001000);
fileId_ = 0L;
onChanged();
return this;
}
// optional int32 childrenNum = 14 [default = -1];
private int childrenNum_ = -1;
/**
* optional int32 childrenNum = 14 [default = -1];
*/
public boolean hasChildrenNum() {
return ((bitField0_ & 0x00002000) == 0x00002000);
}
/**
* optional int32 childrenNum = 14 [default = -1];
*/
public int getChildrenNum() {
return childrenNum_;
}
/**
* optional int32 childrenNum = 14 [default = -1];
*/
public Builder setChildrenNum(int value) {
bitField0_ |= 0x00002000;
childrenNum_ = value;
onChanged();
return this;
}
/**
* optional int32 childrenNum = 14 [default = -1];
*/
public Builder clearChildrenNum() {
bitField0_ = (bitField0_ & ~0x00002000);
childrenNum_ = -1;
onChanged();
return this;
}
// optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto fileEncryptionInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder> fileEncryptionInfoBuilder_;
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
*
*
* Optional field for file encryption
*
*/
public boolean hasFileEncryptionInfo() {
return ((bitField0_ & 0x00004000) == 0x00004000);
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
*
*
* Optional field for file encryption
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo() {
if (fileEncryptionInfoBuilder_ == null) {
return fileEncryptionInfo_;
} else {
return fileEncryptionInfoBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
*
*
* Optional field for file encryption
*
*/
public Builder setFileEncryptionInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto value) {
if (fileEncryptionInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
fileEncryptionInfo_ = value;
onChanged();
} else {
fileEncryptionInfoBuilder_.setMessage(value);
}
bitField0_ |= 0x00004000;
return this;
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
*
*
* Optional field for file encryption
*
*/
public Builder setFileEncryptionInfo(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder builderForValue) {
if (fileEncryptionInfoBuilder_ == null) {
fileEncryptionInfo_ = builderForValue.build();
onChanged();
} else {
fileEncryptionInfoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00004000;
return this;
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
*
*
* Optional field for file encryption
*
*/
public Builder mergeFileEncryptionInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto value) {
if (fileEncryptionInfoBuilder_ == null) {
if (((bitField0_ & 0x00004000) == 0x00004000) &&
fileEncryptionInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance()) {
fileEncryptionInfo_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.newBuilder(fileEncryptionInfo_).mergeFrom(value).buildPartial();
} else {
fileEncryptionInfo_ = value;
}
onChanged();
} else {
fileEncryptionInfoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00004000;
return this;
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
*
*
* Optional field for file encryption
*
*/
public Builder clearFileEncryptionInfo() {
if (fileEncryptionInfoBuilder_ == null) {
fileEncryptionInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance();
onChanged();
} else {
fileEncryptionInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00004000);
return this;
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
*
*
* Optional field for file encryption
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder getFileEncryptionInfoBuilder() {
bitField0_ |= 0x00004000;
onChanged();
return getFileEncryptionInfoFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
*
*
* Optional field for file encryption
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder() {
if (fileEncryptionInfoBuilder_ != null) {
return fileEncryptionInfoBuilder_.getMessageOrBuilder();
} else {
return fileEncryptionInfo_;
}
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
*
*
* Optional field for file encryption
*
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder>
getFileEncryptionInfoFieldBuilder() {
if (fileEncryptionInfoBuilder_ == null) {
fileEncryptionInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder>(
fileEncryptionInfo_,
getParentForChildren(),
isClean());
fileEncryptionInfo_ = null;
}
return fileEncryptionInfoBuilder_;
}
// optional uint32 storagePolicy = 16 [default = 0];
private int storagePolicy_ ;
/**
* optional uint32 storagePolicy = 16 [default = 0];
*
*
* block storage policy id
*
*/
public boolean hasStoragePolicy() {
return ((bitField0_ & 0x00008000) == 0x00008000);
}
/**
* optional uint32 storagePolicy = 16 [default = 0];
*
*
* block storage policy id
*
*/
public int getStoragePolicy() {
return storagePolicy_;
}
/**
* optional uint32 storagePolicy = 16 [default = 0];
*
*
* block storage policy id
*
*/
public Builder setStoragePolicy(int value) {
bitField0_ |= 0x00008000;
storagePolicy_ = value;
onChanged();
return this;
}
/**
* optional uint32 storagePolicy = 16 [default = 0];
*
*
* block storage policy id
*
*/
public Builder clearStoragePolicy() {
bitField0_ = (bitField0_ & ~0x00008000);
storagePolicy_ = 0;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.HdfsFileStatusProto)
}
static {
defaultInstance = new HdfsFileStatusProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.HdfsFileStatusProto)
}
public interface FsServerDefaultsProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required uint64 blockSize = 1;
/**
* required uint64 blockSize = 1;
*/
boolean hasBlockSize();
/**
* required uint64 blockSize = 1;
*/
long getBlockSize();
// required uint32 bytesPerChecksum = 2;
/**
* required uint32 bytesPerChecksum = 2;
*/
boolean hasBytesPerChecksum();
/**
* required uint32 bytesPerChecksum = 2;
*/
int getBytesPerChecksum();
// required uint32 writePacketSize = 3;
/**
* required uint32 writePacketSize = 3;
*/
boolean hasWritePacketSize();
/**
* required uint32 writePacketSize = 3;
*/
int getWritePacketSize();
// required uint32 replication = 4;
/**
* required uint32 replication = 4;
*
*
* Actually a short - only 16 bits used
*
*/
boolean hasReplication();
/**
* required uint32 replication = 4;
*
*
* Actually a short - only 16 bits used
*
*/
int getReplication();
// required uint32 fileBufferSize = 5;
/**
* required uint32 fileBufferSize = 5;
*/
boolean hasFileBufferSize();
/**
* required uint32 fileBufferSize = 5;
*/
int getFileBufferSize();
// optional bool encryptDataTransfer = 6 [default = false];
/**
* optional bool encryptDataTransfer = 6 [default = false];
*/
boolean hasEncryptDataTransfer();
/**
* optional bool encryptDataTransfer = 6 [default = false];
*/
boolean getEncryptDataTransfer();
// optional uint64 trashInterval = 7 [default = 0];
/**
* optional uint64 trashInterval = 7 [default = 0];
*/
boolean hasTrashInterval();
/**
* optional uint64 trashInterval = 7 [default = 0];
*/
long getTrashInterval();
// optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];
/**
* optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];
*/
boolean hasChecksumType();
/**
* optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getChecksumType();
}
/**
* Protobuf type {@code hadoop.hdfs.FsServerDefaultsProto}
*
*
**
* HDFS Server Defaults
*
*/
public static final class FsServerDefaultsProto extends
com.google.protobuf.GeneratedMessage
implements FsServerDefaultsProtoOrBuilder {
// Use FsServerDefaultsProto.newBuilder() to construct.
private FsServerDefaultsProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private FsServerDefaultsProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final FsServerDefaultsProto defaultInstance;
public static FsServerDefaultsProto getDefaultInstance() {
return defaultInstance;
}
public FsServerDefaultsProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private FsServerDefaultsProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
blockSize_ = input.readUInt64();
break;
}
case 16: {
bitField0_ |= 0x00000002;
bytesPerChecksum_ = input.readUInt32();
break;
}
case 24: {
bitField0_ |= 0x00000004;
writePacketSize_ = input.readUInt32();
break;
}
case 32: {
bitField0_ |= 0x00000008;
replication_ = input.readUInt32();
break;
}
case 40: {
bitField0_ |= 0x00000010;
fileBufferSize_ = input.readUInt32();
break;
}
case 48: {
bitField0_ |= 0x00000020;
encryptDataTransfer_ = input.readBool();
break;
}
case 56: {
bitField0_ |= 0x00000040;
trashInterval_ = input.readUInt64();
break;
}
case 64: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(8, rawValue);
} else {
bitField0_ |= 0x00000080;
checksumType_ = value;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsServerDefaultsProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsServerDefaultsProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public FsServerDefaultsProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new FsServerDefaultsProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required uint64 blockSize = 1;
public static final int BLOCKSIZE_FIELD_NUMBER = 1;
private long blockSize_;
/**
* required uint64 blockSize = 1;
*/
public boolean hasBlockSize() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint64 blockSize = 1;
*/
public long getBlockSize() {
return blockSize_;
}
// required uint32 bytesPerChecksum = 2;
public static final int BYTESPERCHECKSUM_FIELD_NUMBER = 2;
private int bytesPerChecksum_;
/**
* required uint32 bytesPerChecksum = 2;
*/
public boolean hasBytesPerChecksum() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint32 bytesPerChecksum = 2;
*/
public int getBytesPerChecksum() {
return bytesPerChecksum_;
}
// required uint32 writePacketSize = 3;
public static final int WRITEPACKETSIZE_FIELD_NUMBER = 3;
private int writePacketSize_;
/**
* required uint32 writePacketSize = 3;
*/
public boolean hasWritePacketSize() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint32 writePacketSize = 3;
*/
public int getWritePacketSize() {
return writePacketSize_;
}
// required uint32 replication = 4;
public static final int REPLICATION_FIELD_NUMBER = 4;
private int replication_;
/**
* required uint32 replication = 4;
*
*
* Actually a short - only 16 bits used
*
*/
public boolean hasReplication() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required uint32 replication = 4;
*
*
* Actually a short - only 16 bits used
*
*/
public int getReplication() {
return replication_;
}
// required uint32 fileBufferSize = 5;
public static final int FILEBUFFERSIZE_FIELD_NUMBER = 5;
private int fileBufferSize_;
/**
* required uint32 fileBufferSize = 5;
*/
public boolean hasFileBufferSize() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required uint32 fileBufferSize = 5;
*/
public int getFileBufferSize() {
return fileBufferSize_;
}
// optional bool encryptDataTransfer = 6 [default = false];
public static final int ENCRYPTDATATRANSFER_FIELD_NUMBER = 6;
private boolean encryptDataTransfer_;
/**
* optional bool encryptDataTransfer = 6 [default = false];
*/
public boolean hasEncryptDataTransfer() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional bool encryptDataTransfer = 6 [default = false];
*/
public boolean getEncryptDataTransfer() {
return encryptDataTransfer_;
}
// optional uint64 trashInterval = 7 [default = 0];
public static final int TRASHINTERVAL_FIELD_NUMBER = 7;
private long trashInterval_;
/**
* optional uint64 trashInterval = 7 [default = 0];
*/
public boolean hasTrashInterval() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional uint64 trashInterval = 7 [default = 0];
*/
public long getTrashInterval() {
return trashInterval_;
}
// optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];
public static final int CHECKSUMTYPE_FIELD_NUMBER = 8;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto checksumType_;
/**
* optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];
*/
public boolean hasChecksumType() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getChecksumType() {
return checksumType_;
}
private void initFields() {
blockSize_ = 0L;
bytesPerChecksum_ = 0;
writePacketSize_ = 0;
replication_ = 0;
fileBufferSize_ = 0;
encryptDataTransfer_ = false;
trashInterval_ = 0L;
checksumType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasBlockSize()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasBytesPerChecksum()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasWritePacketSize()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasReplication()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasFileBufferSize()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt64(1, blockSize_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt32(2, bytesPerChecksum_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt32(3, writePacketSize_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt32(4, replication_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeUInt32(5, fileBufferSize_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeBool(6, encryptDataTransfer_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeUInt64(7, trashInterval_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
output.writeEnum(8, checksumType_.getNumber());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(1, blockSize_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(2, bytesPerChecksum_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(3, writePacketSize_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(4, replication_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(5, fileBufferSize_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(6, encryptDataTransfer_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(7, trashInterval_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(8, checksumType_.getNumber());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto) obj;
boolean result = true;
result = result && (hasBlockSize() == other.hasBlockSize());
if (hasBlockSize()) {
result = result && (getBlockSize()
== other.getBlockSize());
}
result = result && (hasBytesPerChecksum() == other.hasBytesPerChecksum());
if (hasBytesPerChecksum()) {
result = result && (getBytesPerChecksum()
== other.getBytesPerChecksum());
}
result = result && (hasWritePacketSize() == other.hasWritePacketSize());
if (hasWritePacketSize()) {
result = result && (getWritePacketSize()
== other.getWritePacketSize());
}
result = result && (hasReplication() == other.hasReplication());
if (hasReplication()) {
result = result && (getReplication()
== other.getReplication());
}
result = result && (hasFileBufferSize() == other.hasFileBufferSize());
if (hasFileBufferSize()) {
result = result && (getFileBufferSize()
== other.getFileBufferSize());
}
result = result && (hasEncryptDataTransfer() == other.hasEncryptDataTransfer());
if (hasEncryptDataTransfer()) {
result = result && (getEncryptDataTransfer()
== other.getEncryptDataTransfer());
}
result = result && (hasTrashInterval() == other.hasTrashInterval());
if (hasTrashInterval()) {
result = result && (getTrashInterval()
== other.getTrashInterval());
}
result = result && (hasChecksumType() == other.hasChecksumType());
if (hasChecksumType()) {
result = result &&
(getChecksumType() == other.getChecksumType());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasBlockSize()) {
hash = (37 * hash) + BLOCKSIZE_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getBlockSize());
}
if (hasBytesPerChecksum()) {
hash = (37 * hash) + BYTESPERCHECKSUM_FIELD_NUMBER;
hash = (53 * hash) + getBytesPerChecksum();
}
if (hasWritePacketSize()) {
hash = (37 * hash) + WRITEPACKETSIZE_FIELD_NUMBER;
hash = (53 * hash) + getWritePacketSize();
}
if (hasReplication()) {
hash = (37 * hash) + REPLICATION_FIELD_NUMBER;
hash = (53 * hash) + getReplication();
}
if (hasFileBufferSize()) {
hash = (37 * hash) + FILEBUFFERSIZE_FIELD_NUMBER;
hash = (53 * hash) + getFileBufferSize();
}
if (hasEncryptDataTransfer()) {
hash = (37 * hash) + ENCRYPTDATATRANSFER_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getEncryptDataTransfer());
}
if (hasTrashInterval()) {
hash = (37 * hash) + TRASHINTERVAL_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getTrashInterval());
}
if (hasChecksumType()) {
hash = (37 * hash) + CHECKSUMTYPE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getChecksumType());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.FsServerDefaultsProto}
*
*
**
* HDFS Server Defaults
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsServerDefaultsProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsServerDefaultsProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
blockSize_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
bytesPerChecksum_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
writePacketSize_ = 0;
bitField0_ = (bitField0_ & ~0x00000004);
replication_ = 0;
bitField0_ = (bitField0_ & ~0x00000008);
fileBufferSize_ = 0;
bitField0_ = (bitField0_ & ~0x00000010);
encryptDataTransfer_ = false;
bitField0_ = (bitField0_ & ~0x00000020);
trashInterval_ = 0L;
bitField0_ = (bitField0_ & ~0x00000040);
checksumType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32;
bitField0_ = (bitField0_ & ~0x00000080);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsServerDefaultsProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.blockSize_ = blockSize_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.bytesPerChecksum_ = bytesPerChecksum_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.writePacketSize_ = writePacketSize_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.replication_ = replication_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.fileBufferSize_ = fileBufferSize_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000020;
}
result.encryptDataTransfer_ = encryptDataTransfer_;
if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
to_bitField0_ |= 0x00000040;
}
result.trashInterval_ = trashInterval_;
if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
to_bitField0_ |= 0x00000080;
}
result.checksumType_ = checksumType_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.getDefaultInstance()) return this;
if (other.hasBlockSize()) {
setBlockSize(other.getBlockSize());
}
if (other.hasBytesPerChecksum()) {
setBytesPerChecksum(other.getBytesPerChecksum());
}
if (other.hasWritePacketSize()) {
setWritePacketSize(other.getWritePacketSize());
}
if (other.hasReplication()) {
setReplication(other.getReplication());
}
if (other.hasFileBufferSize()) {
setFileBufferSize(other.getFileBufferSize());
}
if (other.hasEncryptDataTransfer()) {
setEncryptDataTransfer(other.getEncryptDataTransfer());
}
if (other.hasTrashInterval()) {
setTrashInterval(other.getTrashInterval());
}
if (other.hasChecksumType()) {
setChecksumType(other.getChecksumType());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasBlockSize()) {
return false;
}
if (!hasBytesPerChecksum()) {
return false;
}
if (!hasWritePacketSize()) {
return false;
}
if (!hasReplication()) {
return false;
}
if (!hasFileBufferSize()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required uint64 blockSize = 1;
private long blockSize_ ;
/**
* required uint64 blockSize = 1;
*/
public boolean hasBlockSize() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint64 blockSize = 1;
*/
public long getBlockSize() {
return blockSize_;
}
/**
* required uint64 blockSize = 1;
*/
public Builder setBlockSize(long value) {
bitField0_ |= 0x00000001;
blockSize_ = value;
onChanged();
return this;
}
/**
* required uint64 blockSize = 1;
*/
public Builder clearBlockSize() {
bitField0_ = (bitField0_ & ~0x00000001);
blockSize_ = 0L;
onChanged();
return this;
}
// required uint32 bytesPerChecksum = 2;
private int bytesPerChecksum_ ;
/**
* required uint32 bytesPerChecksum = 2;
*/
public boolean hasBytesPerChecksum() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint32 bytesPerChecksum = 2;
*/
public int getBytesPerChecksum() {
return bytesPerChecksum_;
}
/**
* required uint32 bytesPerChecksum = 2;
*/
public Builder setBytesPerChecksum(int value) {
bitField0_ |= 0x00000002;
bytesPerChecksum_ = value;
onChanged();
return this;
}
/**
* required uint32 bytesPerChecksum = 2;
*/
public Builder clearBytesPerChecksum() {
bitField0_ = (bitField0_ & ~0x00000002);
bytesPerChecksum_ = 0;
onChanged();
return this;
}
// required uint32 writePacketSize = 3;
private int writePacketSize_ ;
/**
* required uint32 writePacketSize = 3;
*/
public boolean hasWritePacketSize() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint32 writePacketSize = 3;
*/
public int getWritePacketSize() {
return writePacketSize_;
}
/**
* required uint32 writePacketSize = 3;
*/
public Builder setWritePacketSize(int value) {
bitField0_ |= 0x00000004;
writePacketSize_ = value;
onChanged();
return this;
}
/**
* required uint32 writePacketSize = 3;
*/
public Builder clearWritePacketSize() {
bitField0_ = (bitField0_ & ~0x00000004);
writePacketSize_ = 0;
onChanged();
return this;
}
// required uint32 replication = 4;
private int replication_ ;
/**
* required uint32 replication = 4;
*
*
* Actually a short - only 16 bits used
*
*/
public boolean hasReplication() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required uint32 replication = 4;
*
*
* Actually a short - only 16 bits used
*
*/
public int getReplication() {
return replication_;
}
/**
* required uint32 replication = 4;
*
*
* Actually a short - only 16 bits used
*
*/
public Builder setReplication(int value) {
bitField0_ |= 0x00000008;
replication_ = value;
onChanged();
return this;
}
/**
* required uint32 replication = 4;
*
*
* Actually a short - only 16 bits used
*
*/
public Builder clearReplication() {
bitField0_ = (bitField0_ & ~0x00000008);
replication_ = 0;
onChanged();
return this;
}
// required uint32 fileBufferSize = 5;
private int fileBufferSize_ ;
/**
* required uint32 fileBufferSize = 5;
*/
public boolean hasFileBufferSize() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required uint32 fileBufferSize = 5;
*/
public int getFileBufferSize() {
return fileBufferSize_;
}
/**
* required uint32 fileBufferSize = 5;
*/
public Builder setFileBufferSize(int value) {
bitField0_ |= 0x00000010;
fileBufferSize_ = value;
onChanged();
return this;
}
/**
* required uint32 fileBufferSize = 5;
*/
public Builder clearFileBufferSize() {
bitField0_ = (bitField0_ & ~0x00000010);
fileBufferSize_ = 0;
onChanged();
return this;
}
// optional bool encryptDataTransfer = 6 [default = false];
private boolean encryptDataTransfer_ ;
/**
* optional bool encryptDataTransfer = 6 [default = false];
*/
public boolean hasEncryptDataTransfer() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional bool encryptDataTransfer = 6 [default = false];
*/
public boolean getEncryptDataTransfer() {
return encryptDataTransfer_;
}
/**
* optional bool encryptDataTransfer = 6 [default = false];
*/
public Builder setEncryptDataTransfer(boolean value) {
bitField0_ |= 0x00000020;
encryptDataTransfer_ = value;
onChanged();
return this;
}
/**
* optional bool encryptDataTransfer = 6 [default = false];
*/
public Builder clearEncryptDataTransfer() {
bitField0_ = (bitField0_ & ~0x00000020);
encryptDataTransfer_ = false;
onChanged();
return this;
}
// optional uint64 trashInterval = 7 [default = 0];
private long trashInterval_ ;
/**
* optional uint64 trashInterval = 7 [default = 0];
*/
public boolean hasTrashInterval() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional uint64 trashInterval = 7 [default = 0];
*/
public long getTrashInterval() {
return trashInterval_;
}
/**
* optional uint64 trashInterval = 7 [default = 0];
*/
public Builder setTrashInterval(long value) {
bitField0_ |= 0x00000040;
trashInterval_ = value;
onChanged();
return this;
}
/**
* optional uint64 trashInterval = 7 [default = 0];
*/
public Builder clearTrashInterval() {
bitField0_ = (bitField0_ & ~0x00000040);
trashInterval_ = 0L;
onChanged();
return this;
}
// optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto checksumType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32;
/**
* optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];
*/
public boolean hasChecksumType() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getChecksumType() {
return checksumType_;
}
/**
* optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];
*/
public Builder setChecksumType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000080;
checksumType_ = value;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];
*/
public Builder clearChecksumType() {
bitField0_ = (bitField0_ & ~0x00000080);
checksumType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.FsServerDefaultsProto)
}
static {
defaultInstance = new FsServerDefaultsProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.FsServerDefaultsProto)
}
public interface DirectoryListingProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
java.util.List
getPartialListingList();
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index);
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
int getPartialListingCount();
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>
getPartialListingOrBuilderList();
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder(
int index);
// required uint32 remainingEntries = 2;
/**
* required uint32 remainingEntries = 2;
*/
boolean hasRemainingEntries();
/**
* required uint32 remainingEntries = 2;
*/
int getRemainingEntries();
}
/**
* Protobuf type {@code hadoop.hdfs.DirectoryListingProto}
*
*
**
* Directory listing
*
*/
public static final class DirectoryListingProto extends
com.google.protobuf.GeneratedMessage
implements DirectoryListingProtoOrBuilder {
// Use DirectoryListingProto.newBuilder() to construct.
private DirectoryListingProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private DirectoryListingProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final DirectoryListingProto defaultInstance;
public static DirectoryListingProto getDefaultInstance() {
return defaultInstance;
}
public DirectoryListingProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DirectoryListingProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
partialListing_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
partialListing_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.PARSER, extensionRegistry));
break;
}
case 16: {
bitField0_ |= 0x00000001;
remainingEntries_ = input.readUInt32();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
partialListing_ = java.util.Collections.unmodifiableList(partialListing_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DirectoryListingProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DirectoryListingProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public DirectoryListingProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new DirectoryListingProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
public static final int PARTIALLISTING_FIELD_NUMBER = 1;
private java.util.List partialListing_;
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
public java.util.List getPartialListingList() {
return partialListing_;
}
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>
getPartialListingOrBuilderList() {
return partialListing_;
}
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
public int getPartialListingCount() {
return partialListing_.size();
}
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index) {
return partialListing_.get(index);
}
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder(
int index) {
return partialListing_.get(index);
}
// required uint32 remainingEntries = 2;
public static final int REMAININGENTRIES_FIELD_NUMBER = 2;
private int remainingEntries_;
/**
* required uint32 remainingEntries = 2;
*/
public boolean hasRemainingEntries() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint32 remainingEntries = 2;
*/
public int getRemainingEntries() {
return remainingEntries_;
}
private void initFields() {
partialListing_ = java.util.Collections.emptyList();
remainingEntries_ = 0;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasRemainingEntries()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getPartialListingCount(); i++) {
if (!getPartialListing(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < partialListing_.size(); i++) {
output.writeMessage(1, partialListing_.get(i));
}
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt32(2, remainingEntries_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
for (int i = 0; i < partialListing_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, partialListing_.get(i));
}
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(2, remainingEntries_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto) obj;
boolean result = true;
result = result && getPartialListingList()
.equals(other.getPartialListingList());
result = result && (hasRemainingEntries() == other.hasRemainingEntries());
if (hasRemainingEntries()) {
result = result && (getRemainingEntries()
== other.getRemainingEntries());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (getPartialListingCount() > 0) {
hash = (37 * hash) + PARTIALLISTING_FIELD_NUMBER;
hash = (53 * hash) + getPartialListingList().hashCode();
}
if (hasRemainingEntries()) {
hash = (37 * hash) + REMAININGENTRIES_FIELD_NUMBER;
hash = (53 * hash) + getRemainingEntries();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.DirectoryListingProto}
*
*
**
* Directory listing
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DirectoryListingProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DirectoryListingProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getPartialListingFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (partialListingBuilder_ == null) {
partialListing_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
} else {
partialListingBuilder_.clear();
}
remainingEntries_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DirectoryListingProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (partialListingBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001)) {
partialListing_ = java.util.Collections.unmodifiableList(partialListing_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.partialListing_ = partialListing_;
} else {
result.partialListing_ = partialListingBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000001;
}
result.remainingEntries_ = remainingEntries_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.getDefaultInstance()) return this;
if (partialListingBuilder_ == null) {
if (!other.partialListing_.isEmpty()) {
if (partialListing_.isEmpty()) {
partialListing_ = other.partialListing_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensurePartialListingIsMutable();
partialListing_.addAll(other.partialListing_);
}
onChanged();
}
} else {
if (!other.partialListing_.isEmpty()) {
if (partialListingBuilder_.isEmpty()) {
partialListingBuilder_.dispose();
partialListingBuilder_ = null;
partialListing_ = other.partialListing_;
bitField0_ = (bitField0_ & ~0x00000001);
partialListingBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getPartialListingFieldBuilder() : null;
} else {
partialListingBuilder_.addAllMessages(other.partialListing_);
}
}
}
if (other.hasRemainingEntries()) {
setRemainingEntries(other.getRemainingEntries());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasRemainingEntries()) {
return false;
}
for (int i = 0; i < getPartialListingCount(); i++) {
if (!getPartialListing(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
private java.util.List partialListing_ =
java.util.Collections.emptyList();
private void ensurePartialListingIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
partialListing_ = new java.util.ArrayList(partialListing_);
bitField0_ |= 0x00000001;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> partialListingBuilder_;
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
public java.util.List getPartialListingList() {
if (partialListingBuilder_ == null) {
return java.util.Collections.unmodifiableList(partialListing_);
} else {
return partialListingBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
public int getPartialListingCount() {
if (partialListingBuilder_ == null) {
return partialListing_.size();
} else {
return partialListingBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index) {
if (partialListingBuilder_ == null) {
return partialListing_.get(index);
} else {
return partialListingBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
public Builder setPartialListing(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) {
if (partialListingBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensurePartialListingIsMutable();
partialListing_.set(index, value);
onChanged();
} else {
partialListingBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
public Builder setPartialListing(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) {
if (partialListingBuilder_ == null) {
ensurePartialListingIsMutable();
partialListing_.set(index, builderForValue.build());
onChanged();
} else {
partialListingBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
public Builder addPartialListing(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) {
if (partialListingBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensurePartialListingIsMutable();
partialListing_.add(value);
onChanged();
} else {
partialListingBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
public Builder addPartialListing(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) {
if (partialListingBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensurePartialListingIsMutable();
partialListing_.add(index, value);
onChanged();
} else {
partialListingBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
public Builder addPartialListing(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) {
if (partialListingBuilder_ == null) {
ensurePartialListingIsMutable();
partialListing_.add(builderForValue.build());
onChanged();
} else {
partialListingBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
public Builder addPartialListing(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) {
if (partialListingBuilder_ == null) {
ensurePartialListingIsMutable();
partialListing_.add(index, builderForValue.build());
onChanged();
} else {
partialListingBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
public Builder addAllPartialListing(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto> values) {
if (partialListingBuilder_ == null) {
ensurePartialListingIsMutable();
super.addAll(values, partialListing_);
onChanged();
} else {
partialListingBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
public Builder clearPartialListing() {
if (partialListingBuilder_ == null) {
partialListing_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
} else {
partialListingBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
public Builder removePartialListing(int index) {
if (partialListingBuilder_ == null) {
ensurePartialListingIsMutable();
partialListing_.remove(index);
onChanged();
} else {
partialListingBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder getPartialListingBuilder(
int index) {
return getPartialListingFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder(
int index) {
if (partialListingBuilder_ == null) {
return partialListing_.get(index); } else {
return partialListingBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>
getPartialListingOrBuilderList() {
if (partialListingBuilder_ != null) {
return partialListingBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(partialListing_);
}
}
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder addPartialListingBuilder() {
return getPartialListingFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder addPartialListingBuilder(
int index) {
return getPartialListingFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
public java.util.List
getPartialListingBuilderList() {
return getPartialListingFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>
getPartialListingFieldBuilder() {
if (partialListingBuilder_ == null) {
partialListingBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>(
partialListing_,
((bitField0_ & 0x00000001) == 0x00000001),
getParentForChildren(),
isClean());
partialListing_ = null;
}
return partialListingBuilder_;
}
// required uint32 remainingEntries = 2;
private int remainingEntries_ ;
/**
* required uint32 remainingEntries = 2;
*/
public boolean hasRemainingEntries() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint32 remainingEntries = 2;
*/
public int getRemainingEntries() {
return remainingEntries_;
}
/**
* required uint32 remainingEntries = 2;
*/
public Builder setRemainingEntries(int value) {
bitField0_ |= 0x00000002;
remainingEntries_ = value;
onChanged();
return this;
}
/**
* required uint32 remainingEntries = 2;
*/
public Builder clearRemainingEntries() {
bitField0_ = (bitField0_ & ~0x00000002);
remainingEntries_ = 0;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DirectoryListingProto)
}
static {
defaultInstance = new DirectoryListingProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DirectoryListingProto)
}
public interface SnapshottableDirectoryStatusProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;
/**
* required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;
*/
boolean hasDirStatus();
/**
* required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDirStatus();
/**
* required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getDirStatusOrBuilder();
// required uint32 snapshot_quota = 2;
/**
* required uint32 snapshot_quota = 2;
*
*
* Fields specific for snapshottable directory
*
*/
boolean hasSnapshotQuota();
/**
* required uint32 snapshot_quota = 2;
*
*
* Fields specific for snapshottable directory
*
*/
int getSnapshotQuota();
// required uint32 snapshot_number = 3;
/**
* required uint32 snapshot_number = 3;
*/
boolean hasSnapshotNumber();
/**
* required uint32 snapshot_number = 3;
*/
int getSnapshotNumber();
// required bytes parent_fullpath = 4;
/**
* required bytes parent_fullpath = 4;
*/
boolean hasParentFullpath();
/**
* required bytes parent_fullpath = 4;
*/
com.google.protobuf.ByteString getParentFullpath();
}
/**
* Protobuf type {@code hadoop.hdfs.SnapshottableDirectoryStatusProto}
*
*
**
* Status of a snapshottable directory: besides the normal information for
* a directory status, also include snapshot quota, number of snapshots, and
* the full path of the parent directory.
*
*/
public static final class SnapshottableDirectoryStatusProto extends
com.google.protobuf.GeneratedMessage
implements SnapshottableDirectoryStatusProtoOrBuilder {
// Use SnapshottableDirectoryStatusProto.newBuilder() to construct.
private SnapshottableDirectoryStatusProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private SnapshottableDirectoryStatusProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final SnapshottableDirectoryStatusProto defaultInstance;
public static SnapshottableDirectoryStatusProto getDefaultInstance() {
return defaultInstance;
}
public SnapshottableDirectoryStatusProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private SnapshottableDirectoryStatusProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = dirStatus_.toBuilder();
}
dirStatus_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(dirStatus_);
dirStatus_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 16: {
bitField0_ |= 0x00000002;
snapshotQuota_ = input.readUInt32();
break;
}
case 24: {
bitField0_ |= 0x00000004;
snapshotNumber_ = input.readUInt32();
break;
}
case 34: {
bitField0_ |= 0x00000008;
parentFullpath_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public SnapshottableDirectoryStatusProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new SnapshottableDirectoryStatusProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;
public static final int DIRSTATUS_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto dirStatus_;
/**
* required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;
*/
public boolean hasDirStatus() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDirStatus() {
return dirStatus_;
}
/**
* required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getDirStatusOrBuilder() {
return dirStatus_;
}
// required uint32 snapshot_quota = 2;
public static final int SNAPSHOT_QUOTA_FIELD_NUMBER = 2;
private int snapshotQuota_;
/**
* required uint32 snapshot_quota = 2;
*
*
* Fields specific for snapshottable directory
*
*/
public boolean hasSnapshotQuota() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint32 snapshot_quota = 2;
*
*
* Fields specific for snapshottable directory
*
*/
public int getSnapshotQuota() {
return snapshotQuota_;
}
// required uint32 snapshot_number = 3;
public static final int SNAPSHOT_NUMBER_FIELD_NUMBER = 3;
private int snapshotNumber_;
/**
* required uint32 snapshot_number = 3;
*/
public boolean hasSnapshotNumber() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint32 snapshot_number = 3;
*/
public int getSnapshotNumber() {
return snapshotNumber_;
}
// required bytes parent_fullpath = 4;
public static final int PARENT_FULLPATH_FIELD_NUMBER = 4;
private com.google.protobuf.ByteString parentFullpath_;
/**
* required bytes parent_fullpath = 4;
*/
public boolean hasParentFullpath() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required bytes parent_fullpath = 4;
*/
public com.google.protobuf.ByteString getParentFullpath() {
return parentFullpath_;
}
private void initFields() {
dirStatus_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance();
snapshotQuota_ = 0;
snapshotNumber_ = 0;
parentFullpath_ = com.google.protobuf.ByteString.EMPTY;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasDirStatus()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasSnapshotQuota()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasSnapshotNumber()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasParentFullpath()) {
memoizedIsInitialized = 0;
return false;
}
if (!getDirStatus().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, dirStatus_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt32(2, snapshotQuota_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt32(3, snapshotNumber_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeBytes(4, parentFullpath_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, dirStatus_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(2, snapshotQuota_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(3, snapshotNumber_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(4, parentFullpath_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto) obj;
boolean result = true;
result = result && (hasDirStatus() == other.hasDirStatus());
if (hasDirStatus()) {
result = result && getDirStatus()
.equals(other.getDirStatus());
}
result = result && (hasSnapshotQuota() == other.hasSnapshotQuota());
if (hasSnapshotQuota()) {
result = result && (getSnapshotQuota()
== other.getSnapshotQuota());
}
result = result && (hasSnapshotNumber() == other.hasSnapshotNumber());
if (hasSnapshotNumber()) {
result = result && (getSnapshotNumber()
== other.getSnapshotNumber());
}
result = result && (hasParentFullpath() == other.hasParentFullpath());
if (hasParentFullpath()) {
result = result && getParentFullpath()
.equals(other.getParentFullpath());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasDirStatus()) {
hash = (37 * hash) + DIRSTATUS_FIELD_NUMBER;
hash = (53 * hash) + getDirStatus().hashCode();
}
if (hasSnapshotQuota()) {
hash = (37 * hash) + SNAPSHOT_QUOTA_FIELD_NUMBER;
hash = (53 * hash) + getSnapshotQuota();
}
if (hasSnapshotNumber()) {
hash = (37 * hash) + SNAPSHOT_NUMBER_FIELD_NUMBER;
hash = (53 * hash) + getSnapshotNumber();
}
if (hasParentFullpath()) {
hash = (37 * hash) + PARENT_FULLPATH_FIELD_NUMBER;
hash = (53 * hash) + getParentFullpath().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.SnapshottableDirectoryStatusProto}
*
*
**
* Status of a snapshottable directory: besides the normal information for
* a directory status, also include snapshot quota, number of snapshots, and
* the full path of the parent directory.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getDirStatusFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (dirStatusBuilder_ == null) {
dirStatus_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance();
} else {
dirStatusBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
snapshotQuota_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
snapshotNumber_ = 0;
bitField0_ = (bitField0_ & ~0x00000004);
parentFullpath_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (dirStatusBuilder_ == null) {
result.dirStatus_ = dirStatus_;
} else {
result.dirStatus_ = dirStatusBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.snapshotQuota_ = snapshotQuota_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.snapshotNumber_ = snapshotNumber_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.parentFullpath_ = parentFullpath_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.getDefaultInstance()) return this;
if (other.hasDirStatus()) {
mergeDirStatus(other.getDirStatus());
}
if (other.hasSnapshotQuota()) {
setSnapshotQuota(other.getSnapshotQuota());
}
if (other.hasSnapshotNumber()) {
setSnapshotNumber(other.getSnapshotNumber());
}
if (other.hasParentFullpath()) {
setParentFullpath(other.getParentFullpath());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasDirStatus()) {
return false;
}
if (!hasSnapshotQuota()) {
return false;
}
if (!hasSnapshotNumber()) {
return false;
}
if (!hasParentFullpath()) {
return false;
}
if (!getDirStatus().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto dirStatus_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> dirStatusBuilder_;
/**
* required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;
*/
public boolean hasDirStatus() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDirStatus() {
if (dirStatusBuilder_ == null) {
return dirStatus_;
} else {
return dirStatusBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;
*/
public Builder setDirStatus(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) {
if (dirStatusBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
dirStatus_ = value;
onChanged();
} else {
dirStatusBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;
*/
public Builder setDirStatus(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) {
if (dirStatusBuilder_ == null) {
dirStatus_ = builderForValue.build();
onChanged();
} else {
dirStatusBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;
*/
public Builder mergeDirStatus(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) {
if (dirStatusBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
dirStatus_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()) {
dirStatus_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.newBuilder(dirStatus_).mergeFrom(value).buildPartial();
} else {
dirStatus_ = value;
}
onChanged();
} else {
dirStatusBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;
*/
public Builder clearDirStatus() {
if (dirStatusBuilder_ == null) {
dirStatus_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance();
onChanged();
} else {
dirStatusBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder getDirStatusBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getDirStatusFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getDirStatusOrBuilder() {
if (dirStatusBuilder_ != null) {
return dirStatusBuilder_.getMessageOrBuilder();
} else {
return dirStatus_;
}
}
/**
* required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>
getDirStatusFieldBuilder() {
if (dirStatusBuilder_ == null) {
dirStatusBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>(
dirStatus_,
getParentForChildren(),
isClean());
dirStatus_ = null;
}
return dirStatusBuilder_;
}
// required uint32 snapshot_quota = 2;
private int snapshotQuota_ ;
/**
* required uint32 snapshot_quota = 2;
*
*
* Fields specific for snapshottable directory
*
*/
public boolean hasSnapshotQuota() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint32 snapshot_quota = 2;
*
*
* Fields specific for snapshottable directory
*
*/
public int getSnapshotQuota() {
return snapshotQuota_;
}
/**
* required uint32 snapshot_quota = 2;
*
*
* Fields specific for snapshottable directory
*
*/
public Builder setSnapshotQuota(int value) {
bitField0_ |= 0x00000002;
snapshotQuota_ = value;
onChanged();
return this;
}
/**
* required uint32 snapshot_quota = 2;
*
*
* Fields specific for snapshottable directory
*
*/
public Builder clearSnapshotQuota() {
bitField0_ = (bitField0_ & ~0x00000002);
snapshotQuota_ = 0;
onChanged();
return this;
}
// required uint32 snapshot_number = 3;
private int snapshotNumber_ ;
/**
* required uint32 snapshot_number = 3;
*/
public boolean hasSnapshotNumber() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint32 snapshot_number = 3;
*/
public int getSnapshotNumber() {
return snapshotNumber_;
}
/**
* required uint32 snapshot_number = 3;
*/
public Builder setSnapshotNumber(int value) {
bitField0_ |= 0x00000004;
snapshotNumber_ = value;
onChanged();
return this;
}
/**
* required uint32 snapshot_number = 3;
*/
public Builder clearSnapshotNumber() {
bitField0_ = (bitField0_ & ~0x00000004);
snapshotNumber_ = 0;
onChanged();
return this;
}
// required bytes parent_fullpath = 4;
private com.google.protobuf.ByteString parentFullpath_ = com.google.protobuf.ByteString.EMPTY;
/**
* required bytes parent_fullpath = 4;
*/
public boolean hasParentFullpath() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required bytes parent_fullpath = 4;
*/
public com.google.protobuf.ByteString getParentFullpath() {
return parentFullpath_;
}
/**
* required bytes parent_fullpath = 4;
*/
public Builder setParentFullpath(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
parentFullpath_ = value;
onChanged();
return this;
}
/**
* required bytes parent_fullpath = 4;
*/
public Builder clearParentFullpath() {
bitField0_ = (bitField0_ & ~0x00000008);
parentFullpath_ = getDefaultInstance().getParentFullpath();
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshottableDirectoryStatusProto)
}
static {
defaultInstance = new SnapshottableDirectoryStatusProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshottableDirectoryStatusProto)
}
public interface SnapshottableDirectoryListingProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
/**
* repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
*/
java.util.List
getSnapshottableDirListingList();
/**
* repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto getSnapshottableDirListing(int index);
/**
* repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
*/
int getSnapshottableDirListingCount();
/**
* repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder>
getSnapshottableDirListingOrBuilderList();
/**
* repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder getSnapshottableDirListingOrBuilder(
int index);
}
/**
* Protobuf type {@code hadoop.hdfs.SnapshottableDirectoryListingProto}
*
*
**
* Snapshottable directory listing
*
*/
public static final class SnapshottableDirectoryListingProto extends
com.google.protobuf.GeneratedMessage
implements SnapshottableDirectoryListingProtoOrBuilder {
// Use SnapshottableDirectoryListingProto.newBuilder() to construct.
private SnapshottableDirectoryListingProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private SnapshottableDirectoryListingProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final SnapshottableDirectoryListingProto defaultInstance;
public static SnapshottableDirectoryListingProto getDefaultInstance() {
return defaultInstance;
}
public SnapshottableDirectoryListingProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private SnapshottableDirectoryListingProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
snapshottableDirListing_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
snapshottableDirListing_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.PARSER, extensionRegistry));
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
snapshottableDirListing_ = java.util.Collections.unmodifiableList(snapshottableDirListing_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public SnapshottableDirectoryListingProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new SnapshottableDirectoryListingProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
// repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
public static final int SNAPSHOTTABLEDIRLISTING_FIELD_NUMBER = 1;
private java.util.List snapshottableDirListing_;
/**
* repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
*/
public java.util.List getSnapshottableDirListingList() {
return snapshottableDirListing_;
}
/**
* repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder>
getSnapshottableDirListingOrBuilderList() {
return snapshottableDirListing_;
}
/**
* repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
*/
public int getSnapshottableDirListingCount() {
return snapshottableDirListing_.size();
}
/**
* repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto getSnapshottableDirListing(int index) {
return snapshottableDirListing_.get(index);
}
/**
* repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder getSnapshottableDirListingOrBuilder(
int index) {
return snapshottableDirListing_.get(index);
}
private void initFields() {
snapshottableDirListing_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
for (int i = 0; i < getSnapshottableDirListingCount(); i++) {
if (!getSnapshottableDirListing(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < snapshottableDirListing_.size(); i++) {
output.writeMessage(1, snapshottableDirListing_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
for (int i = 0; i < snapshottableDirListing_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, snapshottableDirListing_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto) obj;
boolean result = true;
result = result && getSnapshottableDirListingList()
.equals(other.getSnapshottableDirListingList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (getSnapshottableDirListingCount() > 0) {
hash = (37 * hash) + SNAPSHOTTABLEDIRLISTING_FIELD_NUMBER;
hash = (53 * hash) + getSnapshottableDirListingList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.SnapshottableDirectoryListingProto}
*
*
**
* Snapshottable directory listing
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getSnapshottableDirListingFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (snapshottableDirListingBuilder_ == null) {
snapshottableDirListing_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
} else {
snapshottableDirListingBuilder_.clear();
}
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto(this);
int from_bitField0_ = bitField0_;
if (snapshottableDirListingBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001)) {
snapshottableDirListing_ = java.util.Collections.unmodifiableList(snapshottableDirListing_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.snapshottableDirListing_ = snapshottableDirListing_;
} else {
result.snapshottableDirListing_ = snapshottableDirListingBuilder_.build();
}
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.getDefaultInstance()) return this;
if (snapshottableDirListingBuilder_ == null) {
if (!other.snapshottableDirListing_.isEmpty()) {
if (snapshottableDirListing_.isEmpty()) {
snapshottableDirListing_ = other.snapshottableDirListing_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureSnapshottableDirListingIsMutable();
snapshottableDirListing_.addAll(other.snapshottableDirListing_);
}
onChanged();
}
} else {
if (!other.snapshottableDirListing_.isEmpty()) {
if (snapshottableDirListingBuilder_.isEmpty()) {
snapshottableDirListingBuilder_.dispose();
snapshottableDirListingBuilder_ = null;
snapshottableDirListing_ = other.snapshottableDirListing_;
bitField0_ = (bitField0_ & ~0x00000001);
snapshottableDirListingBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getSnapshottableDirListingFieldBuilder() : null;
} else {
snapshottableDirListingBuilder_.addAllMessages(other.snapshottableDirListing_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
for (int i = 0; i < getSnapshottableDirListingCount(); i++) {
if (!getSnapshottableDirListing(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
private java.util.List snapshottableDirListing_ =
java.util.Collections.emptyList();
private void ensureSnapshottableDirListingIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
snapshottableDirListing_ = new java.util.ArrayList(snapshottableDirListing_);
bitField0_ |= 0x00000001;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder> snapshottableDirListingBuilder_;
/**
* repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
*/
public java.util.List getSnapshottableDirListingList() {
if (snapshottableDirListingBuilder_ == null) {
return java.util.Collections.unmodifiableList(snapshottableDirListing_);
} else {
return snapshottableDirListingBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
*/
public int getSnapshottableDirListingCount() {
if (snapshottableDirListingBuilder_ == null) {
return snapshottableDirListing_.size();
} else {
return snapshottableDirListingBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto getSnapshottableDirListing(int index) {
if (snapshottableDirListingBuilder_ == null) {
return snapshottableDirListing_.get(index);
} else {
return snapshottableDirListingBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
*/
public Builder setSnapshottableDirListing(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto value) {
if (snapshottableDirListingBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureSnapshottableDirListingIsMutable();
snapshottableDirListing_.set(index, value);
onChanged();
} else {
snapshottableDirListingBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
*/
public Builder setSnapshottableDirListing(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder builderForValue) {
if (snapshottableDirListingBuilder_ == null) {
ensureSnapshottableDirListingIsMutable();
snapshottableDirListing_.set(index, builderForValue.build());
onChanged();
} else {
snapshottableDirListingBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
*/
public Builder addSnapshottableDirListing(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto value) {
if (snapshottableDirListingBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureSnapshottableDirListingIsMutable();
snapshottableDirListing_.add(value);
onChanged();
} else {
snapshottableDirListingBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
*/
public Builder addSnapshottableDirListing(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto value) {
if (snapshottableDirListingBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureSnapshottableDirListingIsMutable();
snapshottableDirListing_.add(index, value);
onChanged();
} else {
snapshottableDirListingBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
*/
public Builder addSnapshottableDirListing(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder builderForValue) {
if (snapshottableDirListingBuilder_ == null) {
ensureSnapshottableDirListingIsMutable();
snapshottableDirListing_.add(builderForValue.build());
onChanged();
} else {
snapshottableDirListingBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
*/
public Builder addSnapshottableDirListing(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder builderForValue) {
if (snapshottableDirListingBuilder_ == null) {
ensureSnapshottableDirListingIsMutable();
snapshottableDirListing_.add(index, builderForValue.build());
onChanged();
} else {
snapshottableDirListingBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
*/
public Builder addAllSnapshottableDirListing(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto> values) {
if (snapshottableDirListingBuilder_ == null) {
ensureSnapshottableDirListingIsMutable();
super.addAll(values, snapshottableDirListing_);
onChanged();
} else {
snapshottableDirListingBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
*/
public Builder clearSnapshottableDirListing() {
if (snapshottableDirListingBuilder_ == null) {
snapshottableDirListing_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
} else {
snapshottableDirListingBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
*/
public Builder removeSnapshottableDirListing(int index) {
if (snapshottableDirListingBuilder_ == null) {
ensureSnapshottableDirListingIsMutable();
snapshottableDirListing_.remove(index);
onChanged();
} else {
snapshottableDirListingBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder getSnapshottableDirListingBuilder(
int index) {
return getSnapshottableDirListingFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder getSnapshottableDirListingOrBuilder(
int index) {
if (snapshottableDirListingBuilder_ == null) {
return snapshottableDirListing_.get(index); } else {
return snapshottableDirListingBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder>
getSnapshottableDirListingOrBuilderList() {
if (snapshottableDirListingBuilder_ != null) {
return snapshottableDirListingBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(snapshottableDirListing_);
}
}
/**
* repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder addSnapshottableDirListingBuilder() {
return getSnapshottableDirListingFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder addSnapshottableDirListingBuilder(
int index) {
return getSnapshottableDirListingFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
*/
public java.util.List
getSnapshottableDirListingBuilderList() {
return getSnapshottableDirListingFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder>
getSnapshottableDirListingFieldBuilder() {
if (snapshottableDirListingBuilder_ == null) {
snapshottableDirListingBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder>(
snapshottableDirListing_,
((bitField0_ & 0x00000001) == 0x00000001),
getParentForChildren(),
isClean());
snapshottableDirListing_ = null;
}
return snapshottableDirListingBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshottableDirectoryListingProto)
}
static {
defaultInstance = new SnapshottableDirectoryListingProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshottableDirectoryListingProto)
}
public interface SnapshotDiffReportEntryProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required bytes fullpath = 1;
/**
* required bytes fullpath = 1;
*/
boolean hasFullpath();
/**
* required bytes fullpath = 1;
*/
com.google.protobuf.ByteString getFullpath();
// required string modificationLabel = 2;
/**
* required string modificationLabel = 2;
*/
boolean hasModificationLabel();
/**
* required string modificationLabel = 2;
*/
java.lang.String getModificationLabel();
/**
* required string modificationLabel = 2;
*/
com.google.protobuf.ByteString
getModificationLabelBytes();
// optional bytes targetPath = 3;
/**
* optional bytes targetPath = 3;
*/
boolean hasTargetPath();
/**
* optional bytes targetPath = 3;
*/
com.google.protobuf.ByteString getTargetPath();
}
/**
* Protobuf type {@code hadoop.hdfs.SnapshotDiffReportEntryProto}
*
*
**
* Snapshot diff report entry
*
*/
public static final class SnapshotDiffReportEntryProto extends
com.google.protobuf.GeneratedMessage
implements SnapshotDiffReportEntryProtoOrBuilder {
// Use SnapshotDiffReportEntryProto.newBuilder() to construct.
private SnapshotDiffReportEntryProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private SnapshotDiffReportEntryProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final SnapshotDiffReportEntryProto defaultInstance;
public static SnapshotDiffReportEntryProto getDefaultInstance() {
return defaultInstance;
}
public SnapshotDiffReportEntryProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private SnapshotDiffReportEntryProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
fullpath_ = input.readBytes();
break;
}
case 18: {
bitField0_ |= 0x00000002;
modificationLabel_ = input.readBytes();
break;
}
case 26: {
bitField0_ |= 0x00000004;
targetPath_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public SnapshotDiffReportEntryProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new SnapshotDiffReportEntryProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required bytes fullpath = 1;
public static final int FULLPATH_FIELD_NUMBER = 1;
private com.google.protobuf.ByteString fullpath_;
/**
* required bytes fullpath = 1;
*/
public boolean hasFullpath() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required bytes fullpath = 1;
*/
public com.google.protobuf.ByteString getFullpath() {
return fullpath_;
}
// required string modificationLabel = 2;
public static final int MODIFICATIONLABEL_FIELD_NUMBER = 2;
private java.lang.Object modificationLabel_;
/**
* required string modificationLabel = 2;
*/
public boolean hasModificationLabel() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string modificationLabel = 2;
*/
public java.lang.String getModificationLabel() {
java.lang.Object ref = modificationLabel_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
modificationLabel_ = s;
}
return s;
}
}
/**
* required string modificationLabel = 2;
*/
public com.google.protobuf.ByteString
getModificationLabelBytes() {
java.lang.Object ref = modificationLabel_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
modificationLabel_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// optional bytes targetPath = 3;
public static final int TARGETPATH_FIELD_NUMBER = 3;
private com.google.protobuf.ByteString targetPath_;
/**
* optional bytes targetPath = 3;
*/
public boolean hasTargetPath() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional bytes targetPath = 3;
*/
public com.google.protobuf.ByteString getTargetPath() {
return targetPath_;
}
private void initFields() {
fullpath_ = com.google.protobuf.ByteString.EMPTY;
modificationLabel_ = "";
targetPath_ = com.google.protobuf.ByteString.EMPTY;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasFullpath()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasModificationLabel()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, fullpath_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getModificationLabelBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, targetPath_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, fullpath_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getModificationLabelBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, targetPath_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto) obj;
boolean result = true;
result = result && (hasFullpath() == other.hasFullpath());
if (hasFullpath()) {
result = result && getFullpath()
.equals(other.getFullpath());
}
result = result && (hasModificationLabel() == other.hasModificationLabel());
if (hasModificationLabel()) {
result = result && getModificationLabel()
.equals(other.getModificationLabel());
}
result = result && (hasTargetPath() == other.hasTargetPath());
if (hasTargetPath()) {
result = result && getTargetPath()
.equals(other.getTargetPath());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasFullpath()) {
hash = (37 * hash) + FULLPATH_FIELD_NUMBER;
hash = (53 * hash) + getFullpath().hashCode();
}
if (hasModificationLabel()) {
hash = (37 * hash) + MODIFICATIONLABEL_FIELD_NUMBER;
hash = (53 * hash) + getModificationLabel().hashCode();
}
if (hasTargetPath()) {
hash = (37 * hash) + TARGETPATH_FIELD_NUMBER;
hash = (53 * hash) + getTargetPath().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.SnapshotDiffReportEntryProto}
*
*
**
* Snapshot diff report entry
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
fullpath_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000001);
modificationLabel_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
targetPath_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.fullpath_ = fullpath_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.modificationLabel_ = modificationLabel_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.targetPath_ = targetPath_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.getDefaultInstance()) return this;
if (other.hasFullpath()) {
setFullpath(other.getFullpath());
}
if (other.hasModificationLabel()) {
bitField0_ |= 0x00000002;
modificationLabel_ = other.modificationLabel_;
onChanged();
}
if (other.hasTargetPath()) {
setTargetPath(other.getTargetPath());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasFullpath()) {
return false;
}
if (!hasModificationLabel()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required bytes fullpath = 1;
private com.google.protobuf.ByteString fullpath_ = com.google.protobuf.ByteString.EMPTY;
/**
* required bytes fullpath = 1;
*/
public boolean hasFullpath() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required bytes fullpath = 1;
*/
public com.google.protobuf.ByteString getFullpath() {
return fullpath_;
}
/**
* required bytes fullpath = 1;
*/
public Builder setFullpath(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
fullpath_ = value;
onChanged();
return this;
}
/**
* required bytes fullpath = 1;
*/
public Builder clearFullpath() {
bitField0_ = (bitField0_ & ~0x00000001);
fullpath_ = getDefaultInstance().getFullpath();
onChanged();
return this;
}
// required string modificationLabel = 2;
private java.lang.Object modificationLabel_ = "";
/**
* required string modificationLabel = 2;
*/
public boolean hasModificationLabel() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string modificationLabel = 2;
*/
public java.lang.String getModificationLabel() {
java.lang.Object ref = modificationLabel_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
modificationLabel_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string modificationLabel = 2;
*/
public com.google.protobuf.ByteString
getModificationLabelBytes() {
java.lang.Object ref = modificationLabel_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
modificationLabel_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string modificationLabel = 2;
*/
public Builder setModificationLabel(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
modificationLabel_ = value;
onChanged();
return this;
}
/**
* required string modificationLabel = 2;
*/
public Builder clearModificationLabel() {
bitField0_ = (bitField0_ & ~0x00000002);
modificationLabel_ = getDefaultInstance().getModificationLabel();
onChanged();
return this;
}
/**
* required string modificationLabel = 2;
*/
public Builder setModificationLabelBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
modificationLabel_ = value;
onChanged();
return this;
}
// optional bytes targetPath = 3;
private com.google.protobuf.ByteString targetPath_ = com.google.protobuf.ByteString.EMPTY;
/**
* optional bytes targetPath = 3;
*/
public boolean hasTargetPath() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional bytes targetPath = 3;
*/
public com.google.protobuf.ByteString getTargetPath() {
return targetPath_;
}
/**
* optional bytes targetPath = 3;
*/
public Builder setTargetPath(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
targetPath_ = value;
onChanged();
return this;
}
/**
* optional bytes targetPath = 3;
*/
public Builder clearTargetPath() {
bitField0_ = (bitField0_ & ~0x00000004);
targetPath_ = getDefaultInstance().getTargetPath();
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshotDiffReportEntryProto)
}
static {
defaultInstance = new SnapshotDiffReportEntryProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshotDiffReportEntryProto)
}
public interface SnapshotDiffReportProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string snapshotRoot = 1;
/**
* required string snapshotRoot = 1;
*
*
* full path of the directory where snapshots were taken
*
*/
boolean hasSnapshotRoot();
/**
* required string snapshotRoot = 1;
*
*
* full path of the directory where snapshots were taken
*
*/
java.lang.String getSnapshotRoot();
/**
* required string snapshotRoot = 1;
*
*
* full path of the directory where snapshots were taken
*
*/
com.google.protobuf.ByteString
getSnapshotRootBytes();
// required string fromSnapshot = 2;
/**
* required string fromSnapshot = 2;
*/
boolean hasFromSnapshot();
/**
* required string fromSnapshot = 2;
*/
java.lang.String getFromSnapshot();
/**
* required string fromSnapshot = 2;
*/
com.google.protobuf.ByteString
getFromSnapshotBytes();
// required string toSnapshot = 3;
/**
* required string toSnapshot = 3;
*/
boolean hasToSnapshot();
/**
* required string toSnapshot = 3;
*/
java.lang.String getToSnapshot();
/**
* required string toSnapshot = 3;
*/
com.google.protobuf.ByteString
getToSnapshotBytes();
// repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
/**
* repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
*/
java.util.List
getDiffReportEntriesList();
/**
* repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto getDiffReportEntries(int index);
/**
* repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
*/
int getDiffReportEntriesCount();
/**
* repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder>
getDiffReportEntriesOrBuilderList();
/**
* repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder getDiffReportEntriesOrBuilder(
int index);
}
/**
* Protobuf type {@code hadoop.hdfs.SnapshotDiffReportProto}
*
*
**
* Snapshot diff report
*
*/
public static final class SnapshotDiffReportProto extends
com.google.protobuf.GeneratedMessage
implements SnapshotDiffReportProtoOrBuilder {
// Use SnapshotDiffReportProto.newBuilder() to construct.
private SnapshotDiffReportProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private SnapshotDiffReportProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final SnapshotDiffReportProto defaultInstance;
public static SnapshotDiffReportProto getDefaultInstance() {
return defaultInstance;
}
public SnapshotDiffReportProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private SnapshotDiffReportProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
snapshotRoot_ = input.readBytes();
break;
}
case 18: {
bitField0_ |= 0x00000002;
fromSnapshot_ = input.readBytes();
break;
}
case 26: {
bitField0_ |= 0x00000004;
toSnapshot_ = input.readBytes();
break;
}
case 34: {
if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
diffReportEntries_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000008;
}
diffReportEntries_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.PARSER, extensionRegistry));
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
diffReportEntries_ = java.util.Collections.unmodifiableList(diffReportEntries_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public SnapshotDiffReportProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new SnapshotDiffReportProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required string snapshotRoot = 1;
public static final int SNAPSHOTROOT_FIELD_NUMBER = 1;
private java.lang.Object snapshotRoot_;
/**
* required string snapshotRoot = 1;
*
*
* full path of the directory where snapshots were taken
*
*/
public boolean hasSnapshotRoot() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string snapshotRoot = 1;
*
*
* full path of the directory where snapshots were taken
*
*/
public java.lang.String getSnapshotRoot() {
java.lang.Object ref = snapshotRoot_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
snapshotRoot_ = s;
}
return s;
}
}
/**
* required string snapshotRoot = 1;
*
*
* full path of the directory where snapshots were taken
*
*/
public com.google.protobuf.ByteString
getSnapshotRootBytes() {
java.lang.Object ref = snapshotRoot_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
snapshotRoot_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required string fromSnapshot = 2;
public static final int FROMSNAPSHOT_FIELD_NUMBER = 2;
private java.lang.Object fromSnapshot_;
/**
* required string fromSnapshot = 2;
*/
public boolean hasFromSnapshot() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string fromSnapshot = 2;
*/
public java.lang.String getFromSnapshot() {
java.lang.Object ref = fromSnapshot_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
fromSnapshot_ = s;
}
return s;
}
}
/**
* required string fromSnapshot = 2;
*/
public com.google.protobuf.ByteString
getFromSnapshotBytes() {
java.lang.Object ref = fromSnapshot_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
fromSnapshot_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required string toSnapshot = 3;
public static final int TOSNAPSHOT_FIELD_NUMBER = 3;
private java.lang.Object toSnapshot_;
/**
* required string toSnapshot = 3;
*/
public boolean hasToSnapshot() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required string toSnapshot = 3;
*/
public java.lang.String getToSnapshot() {
java.lang.Object ref = toSnapshot_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
toSnapshot_ = s;
}
return s;
}
}
/**
* required string toSnapshot = 3;
*/
public com.google.protobuf.ByteString
getToSnapshotBytes() {
java.lang.Object ref = toSnapshot_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
toSnapshot_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
public static final int DIFFREPORTENTRIES_FIELD_NUMBER = 4;
private java.util.List diffReportEntries_;
/**
* repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
*/
public java.util.List getDiffReportEntriesList() {
return diffReportEntries_;
}
/**
* repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder>
getDiffReportEntriesOrBuilderList() {
return diffReportEntries_;
}
/**
* repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
*/
public int getDiffReportEntriesCount() {
return diffReportEntries_.size();
}
/**
* repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto getDiffReportEntries(int index) {
return diffReportEntries_.get(index);
}
/**
* repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder getDiffReportEntriesOrBuilder(
int index) {
return diffReportEntries_.get(index);
}
private void initFields() {
snapshotRoot_ = "";
fromSnapshot_ = "";
toSnapshot_ = "";
diffReportEntries_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasSnapshotRoot()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasFromSnapshot()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasToSnapshot()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getDiffReportEntriesCount(); i++) {
if (!getDiffReportEntries(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getSnapshotRootBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getFromSnapshotBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, getToSnapshotBytes());
}
for (int i = 0; i < diffReportEntries_.size(); i++) {
output.writeMessage(4, diffReportEntries_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getSnapshotRootBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getFromSnapshotBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, getToSnapshotBytes());
}
for (int i = 0; i < diffReportEntries_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(4, diffReportEntries_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto) obj;
boolean result = true;
result = result && (hasSnapshotRoot() == other.hasSnapshotRoot());
if (hasSnapshotRoot()) {
result = result && getSnapshotRoot()
.equals(other.getSnapshotRoot());
}
result = result && (hasFromSnapshot() == other.hasFromSnapshot());
if (hasFromSnapshot()) {
result = result && getFromSnapshot()
.equals(other.getFromSnapshot());
}
result = result && (hasToSnapshot() == other.hasToSnapshot());
if (hasToSnapshot()) {
result = result && getToSnapshot()
.equals(other.getToSnapshot());
}
result = result && getDiffReportEntriesList()
.equals(other.getDiffReportEntriesList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasSnapshotRoot()) {
hash = (37 * hash) + SNAPSHOTROOT_FIELD_NUMBER;
hash = (53 * hash) + getSnapshotRoot().hashCode();
}
if (hasFromSnapshot()) {
hash = (37 * hash) + FROMSNAPSHOT_FIELD_NUMBER;
hash = (53 * hash) + getFromSnapshot().hashCode();
}
if (hasToSnapshot()) {
hash = (37 * hash) + TOSNAPSHOT_FIELD_NUMBER;
hash = (53 * hash) + getToSnapshot().hashCode();
}
if (getDiffReportEntriesCount() > 0) {
hash = (37 * hash) + DIFFREPORTENTRIES_FIELD_NUMBER;
hash = (53 * hash) + getDiffReportEntriesList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.SnapshotDiffReportProto}
*
*
**
* Snapshot diff report
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getDiffReportEntriesFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
snapshotRoot_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
fromSnapshot_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
toSnapshot_ = "";
bitField0_ = (bitField0_ & ~0x00000004);
if (diffReportEntriesBuilder_ == null) {
diffReportEntries_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000008);
} else {
diffReportEntriesBuilder_.clear();
}
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.snapshotRoot_ = snapshotRoot_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.fromSnapshot_ = fromSnapshot_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.toSnapshot_ = toSnapshot_;
if (diffReportEntriesBuilder_ == null) {
if (((bitField0_ & 0x00000008) == 0x00000008)) {
diffReportEntries_ = java.util.Collections.unmodifiableList(diffReportEntries_);
bitField0_ = (bitField0_ & ~0x00000008);
}
result.diffReportEntries_ = diffReportEntries_;
} else {
result.diffReportEntries_ = diffReportEntriesBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.getDefaultInstance()) return this;
if (other.hasSnapshotRoot()) {
bitField0_ |= 0x00000001;
snapshotRoot_ = other.snapshotRoot_;
onChanged();
}
if (other.hasFromSnapshot()) {
bitField0_ |= 0x00000002;
fromSnapshot_ = other.fromSnapshot_;
onChanged();
}
if (other.hasToSnapshot()) {
bitField0_ |= 0x00000004;
toSnapshot_ = other.toSnapshot_;
onChanged();
}
if (diffReportEntriesBuilder_ == null) {
if (!other.diffReportEntries_.isEmpty()) {
if (diffReportEntries_.isEmpty()) {
diffReportEntries_ = other.diffReportEntries_;
bitField0_ = (bitField0_ & ~0x00000008);
} else {
ensureDiffReportEntriesIsMutable();
diffReportEntries_.addAll(other.diffReportEntries_);
}
onChanged();
}
} else {
if (!other.diffReportEntries_.isEmpty()) {
if (diffReportEntriesBuilder_.isEmpty()) {
diffReportEntriesBuilder_.dispose();
diffReportEntriesBuilder_ = null;
diffReportEntries_ = other.diffReportEntries_;
bitField0_ = (bitField0_ & ~0x00000008);
diffReportEntriesBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getDiffReportEntriesFieldBuilder() : null;
} else {
diffReportEntriesBuilder_.addAllMessages(other.diffReportEntries_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasSnapshotRoot()) {
return false;
}
if (!hasFromSnapshot()) {
return false;
}
if (!hasToSnapshot()) {
return false;
}
for (int i = 0; i < getDiffReportEntriesCount(); i++) {
if (!getDiffReportEntries(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string snapshotRoot = 1;
private java.lang.Object snapshotRoot_ = "";
/**
* required string snapshotRoot = 1;
*
*
* full path of the directory where snapshots were taken
*
*/
public boolean hasSnapshotRoot() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string snapshotRoot = 1;
*
*
* full path of the directory where snapshots were taken
*
*/
public java.lang.String getSnapshotRoot() {
java.lang.Object ref = snapshotRoot_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
snapshotRoot_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string snapshotRoot = 1;
*
*
* full path of the directory where snapshots were taken
*
*/
public com.google.protobuf.ByteString
getSnapshotRootBytes() {
java.lang.Object ref = snapshotRoot_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
snapshotRoot_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string snapshotRoot = 1;
*
*
* full path of the directory where snapshots were taken
*
*/
public Builder setSnapshotRoot(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
snapshotRoot_ = value;
onChanged();
return this;
}
/**
* required string snapshotRoot = 1;
*
*
* full path of the directory where snapshots were taken
*
*/
public Builder clearSnapshotRoot() {
bitField0_ = (bitField0_ & ~0x00000001);
snapshotRoot_ = getDefaultInstance().getSnapshotRoot();
onChanged();
return this;
}
/**
* required string snapshotRoot = 1;
*
*
* full path of the directory where snapshots were taken
*
*/
public Builder setSnapshotRootBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
snapshotRoot_ = value;
onChanged();
return this;
}
// required string fromSnapshot = 2;
private java.lang.Object fromSnapshot_ = "";
/**
* required string fromSnapshot = 2;
*/
public boolean hasFromSnapshot() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string fromSnapshot = 2;
*/
public java.lang.String getFromSnapshot() {
java.lang.Object ref = fromSnapshot_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
fromSnapshot_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string fromSnapshot = 2;
*/
public com.google.protobuf.ByteString
getFromSnapshotBytes() {
java.lang.Object ref = fromSnapshot_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
fromSnapshot_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string fromSnapshot = 2;
*/
public Builder setFromSnapshot(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
fromSnapshot_ = value;
onChanged();
return this;
}
/**
* required string fromSnapshot = 2;
*/
public Builder clearFromSnapshot() {
bitField0_ = (bitField0_ & ~0x00000002);
fromSnapshot_ = getDefaultInstance().getFromSnapshot();
onChanged();
return this;
}
/**
* required string fromSnapshot = 2;
*/
public Builder setFromSnapshotBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
fromSnapshot_ = value;
onChanged();
return this;
}
// required string toSnapshot = 3;
private java.lang.Object toSnapshot_ = "";
/**
* required string toSnapshot = 3;
*/
public boolean hasToSnapshot() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required string toSnapshot = 3;
*/
public java.lang.String getToSnapshot() {
java.lang.Object ref = toSnapshot_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
toSnapshot_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string toSnapshot = 3;
*/
public com.google.protobuf.ByteString
getToSnapshotBytes() {
java.lang.Object ref = toSnapshot_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
toSnapshot_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string toSnapshot = 3;
*/
public Builder setToSnapshot(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
toSnapshot_ = value;
onChanged();
return this;
}
/**
* required string toSnapshot = 3;
*/
public Builder clearToSnapshot() {
bitField0_ = (bitField0_ & ~0x00000004);
toSnapshot_ = getDefaultInstance().getToSnapshot();
onChanged();
return this;
}
/**
* required string toSnapshot = 3;
*/
public Builder setToSnapshotBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
toSnapshot_ = value;
onChanged();
return this;
}
// repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
private java.util.List diffReportEntries_ =
java.util.Collections.emptyList();
private void ensureDiffReportEntriesIsMutable() {
if (!((bitField0_ & 0x00000008) == 0x00000008)) {
diffReportEntries_ = new java.util.ArrayList(diffReportEntries_);
bitField0_ |= 0x00000008;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder> diffReportEntriesBuilder_;
/**
* repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
*/
public java.util.List getDiffReportEntriesList() {
if (diffReportEntriesBuilder_ == null) {
return java.util.Collections.unmodifiableList(diffReportEntries_);
} else {
return diffReportEntriesBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
*/
public int getDiffReportEntriesCount() {
if (diffReportEntriesBuilder_ == null) {
return diffReportEntries_.size();
} else {
return diffReportEntriesBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto getDiffReportEntries(int index) {
if (diffReportEntriesBuilder_ == null) {
return diffReportEntries_.get(index);
} else {
return diffReportEntriesBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
*/
public Builder setDiffReportEntries(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto value) {
if (diffReportEntriesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureDiffReportEntriesIsMutable();
diffReportEntries_.set(index, value);
onChanged();
} else {
diffReportEntriesBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
*/
public Builder setDiffReportEntries(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder builderForValue) {
if (diffReportEntriesBuilder_ == null) {
ensureDiffReportEntriesIsMutable();
diffReportEntries_.set(index, builderForValue.build());
onChanged();
} else {
diffReportEntriesBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
*/
public Builder addDiffReportEntries(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto value) {
if (diffReportEntriesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureDiffReportEntriesIsMutable();
diffReportEntries_.add(value);
onChanged();
} else {
diffReportEntriesBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
*/
public Builder addDiffReportEntries(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto value) {
if (diffReportEntriesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureDiffReportEntriesIsMutable();
diffReportEntries_.add(index, value);
onChanged();
} else {
diffReportEntriesBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
*/
public Builder addDiffReportEntries(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder builderForValue) {
if (diffReportEntriesBuilder_ == null) {
ensureDiffReportEntriesIsMutable();
diffReportEntries_.add(builderForValue.build());
onChanged();
} else {
diffReportEntriesBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
*/
public Builder addDiffReportEntries(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder builderForValue) {
if (diffReportEntriesBuilder_ == null) {
ensureDiffReportEntriesIsMutable();
diffReportEntries_.add(index, builderForValue.build());
onChanged();
} else {
diffReportEntriesBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
*/
public Builder addAllDiffReportEntries(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto> values) {
if (diffReportEntriesBuilder_ == null) {
ensureDiffReportEntriesIsMutable();
super.addAll(values, diffReportEntries_);
onChanged();
} else {
diffReportEntriesBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
*/
public Builder clearDiffReportEntries() {
if (diffReportEntriesBuilder_ == null) {
diffReportEntries_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000008);
onChanged();
} else {
diffReportEntriesBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
*/
public Builder removeDiffReportEntries(int index) {
if (diffReportEntriesBuilder_ == null) {
ensureDiffReportEntriesIsMutable();
diffReportEntries_.remove(index);
onChanged();
} else {
diffReportEntriesBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder getDiffReportEntriesBuilder(
int index) {
return getDiffReportEntriesFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder getDiffReportEntriesOrBuilder(
int index) {
if (diffReportEntriesBuilder_ == null) {
return diffReportEntries_.get(index); } else {
return diffReportEntriesBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder>
getDiffReportEntriesOrBuilderList() {
if (diffReportEntriesBuilder_ != null) {
return diffReportEntriesBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(diffReportEntries_);
}
}
/**
* repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder addDiffReportEntriesBuilder() {
return getDiffReportEntriesFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder addDiffReportEntriesBuilder(
int index) {
return getDiffReportEntriesFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;
*/
public java.util.List
getDiffReportEntriesBuilderList() {
return getDiffReportEntriesFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder>
getDiffReportEntriesFieldBuilder() {
if (diffReportEntriesBuilder_ == null) {
diffReportEntriesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder>(
diffReportEntries_,
((bitField0_ & 0x00000008) == 0x00000008),
getParentForChildren(),
isClean());
diffReportEntries_ = null;
}
return diffReportEntriesBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshotDiffReportProto)
}
static {
defaultInstance = new SnapshotDiffReportProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshotDiffReportProto)
}
public interface StorageInfoProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required uint32 layoutVersion = 1;
/**
* required uint32 layoutVersion = 1;
*
*
* Layout version of the file system
*
*/
boolean hasLayoutVersion();
/**
* required uint32 layoutVersion = 1;
*
*
* Layout version of the file system
*
*/
int getLayoutVersion();
// required uint32 namespceID = 2;
/**
* required uint32 namespceID = 2;
*
*
* File system namespace ID
*
*/
boolean hasNamespceID();
/**
* required uint32 namespceID = 2;
*
*
* File system namespace ID
*
*/
int getNamespceID();
// required string clusterID = 3;
/**
* required string clusterID = 3;
*
*
* ID of the cluster
*
*/
boolean hasClusterID();
/**
* required string clusterID = 3;
*
*
* ID of the cluster
*
*/
java.lang.String getClusterID();
/**
* required string clusterID = 3;
*
*
* ID of the cluster
*
*/
com.google.protobuf.ByteString
getClusterIDBytes();
// required uint64 cTime = 4;
/**
* required uint64 cTime = 4;
*
*
* File system creation time
*
*/
boolean hasCTime();
/**
* required uint64 cTime = 4;
*
*
* File system creation time
*
*/
long getCTime();
}
/**
* Protobuf type {@code hadoop.hdfs.StorageInfoProto}
*
*
**
* Common node information shared by all the nodes in the cluster
*
*/
public static final class StorageInfoProto extends
com.google.protobuf.GeneratedMessage
implements StorageInfoProtoOrBuilder {
// Use StorageInfoProto.newBuilder() to construct.
private StorageInfoProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private StorageInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final StorageInfoProto defaultInstance;
public static StorageInfoProto getDefaultInstance() {
return defaultInstance;
}
public StorageInfoProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private StorageInfoProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
layoutVersion_ = input.readUInt32();
break;
}
case 16: {
bitField0_ |= 0x00000002;
namespceID_ = input.readUInt32();
break;
}
case 26: {
bitField0_ |= 0x00000004;
clusterID_ = input.readBytes();
break;
}
case 32: {
bitField0_ |= 0x00000008;
cTime_ = input.readUInt64();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public StorageInfoProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new StorageInfoProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required uint32 layoutVersion = 1;
public static final int LAYOUTVERSION_FIELD_NUMBER = 1;
private int layoutVersion_;
/**
* required uint32 layoutVersion = 1;
*
*
* Layout version of the file system
*
*/
public boolean hasLayoutVersion() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint32 layoutVersion = 1;
*
*
* Layout version of the file system
*
*/
public int getLayoutVersion() {
return layoutVersion_;
}
// required uint32 namespceID = 2;
public static final int NAMESPCEID_FIELD_NUMBER = 2;
private int namespceID_;
/**
* required uint32 namespceID = 2;
*
*
* File system namespace ID
*
*/
public boolean hasNamespceID() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint32 namespceID = 2;
*
*
* File system namespace ID
*
*/
public int getNamespceID() {
return namespceID_;
}
// required string clusterID = 3;
public static final int CLUSTERID_FIELD_NUMBER = 3;
private java.lang.Object clusterID_;
/**
* required string clusterID = 3;
*
*
* ID of the cluster
*
*/
public boolean hasClusterID() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required string clusterID = 3;
*
*
* ID of the cluster
*
*/
public java.lang.String getClusterID() {
java.lang.Object ref = clusterID_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
clusterID_ = s;
}
return s;
}
}
/**
* required string clusterID = 3;
*
*
* ID of the cluster
*
*/
public com.google.protobuf.ByteString
getClusterIDBytes() {
java.lang.Object ref = clusterID_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
clusterID_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required uint64 cTime = 4;
public static final int CTIME_FIELD_NUMBER = 4;
private long cTime_;
/**
* required uint64 cTime = 4;
*
*
* File system creation time
*
*/
public boolean hasCTime() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required uint64 cTime = 4;
*
*
* File system creation time
*
*/
public long getCTime() {
return cTime_;
}
private void initFields() {
layoutVersion_ = 0;
namespceID_ = 0;
clusterID_ = "";
cTime_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasLayoutVersion()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasNamespceID()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasClusterID()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasCTime()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt32(1, layoutVersion_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt32(2, namespceID_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, getClusterIDBytes());
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt64(4, cTime_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(1, layoutVersion_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(2, namespceID_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, getClusterIDBytes());
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(4, cTime_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto) obj;
boolean result = true;
result = result && (hasLayoutVersion() == other.hasLayoutVersion());
if (hasLayoutVersion()) {
result = result && (getLayoutVersion()
== other.getLayoutVersion());
}
result = result && (hasNamespceID() == other.hasNamespceID());
if (hasNamespceID()) {
result = result && (getNamespceID()
== other.getNamespceID());
}
result = result && (hasClusterID() == other.hasClusterID());
if (hasClusterID()) {
result = result && getClusterID()
.equals(other.getClusterID());
}
result = result && (hasCTime() == other.hasCTime());
if (hasCTime()) {
result = result && (getCTime()
== other.getCTime());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasLayoutVersion()) {
hash = (37 * hash) + LAYOUTVERSION_FIELD_NUMBER;
hash = (53 * hash) + getLayoutVersion();
}
if (hasNamespceID()) {
hash = (37 * hash) + NAMESPCEID_FIELD_NUMBER;
hash = (53 * hash) + getNamespceID();
}
if (hasClusterID()) {
hash = (37 * hash) + CLUSTERID_FIELD_NUMBER;
hash = (53 * hash) + getClusterID().hashCode();
}
if (hasCTime()) {
hash = (37 * hash) + CTIME_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getCTime());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.StorageInfoProto}
*
*
**
* Common node information shared by all the nodes in the cluster
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
layoutVersion_ = 0;
bitField0_ = (bitField0_ & ~0x00000001);
namespceID_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
clusterID_ = "";
bitField0_ = (bitField0_ & ~0x00000004);
cTime_ = 0L;
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageInfoProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.layoutVersion_ = layoutVersion_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.namespceID_ = namespceID_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.clusterID_ = clusterID_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.cTime_ = cTime_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance()) return this;
if (other.hasLayoutVersion()) {
setLayoutVersion(other.getLayoutVersion());
}
if (other.hasNamespceID()) {
setNamespceID(other.getNamespceID());
}
if (other.hasClusterID()) {
bitField0_ |= 0x00000004;
clusterID_ = other.clusterID_;
onChanged();
}
if (other.hasCTime()) {
setCTime(other.getCTime());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasLayoutVersion()) {
return false;
}
if (!hasNamespceID()) {
return false;
}
if (!hasClusterID()) {
return false;
}
if (!hasCTime()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required uint32 layoutVersion = 1;
private int layoutVersion_ ;
/**
* required uint32 layoutVersion = 1;
*
*
* Layout version of the file system
*
*/
public boolean hasLayoutVersion() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint32 layoutVersion = 1;
*
*
* Layout version of the file system
*
*/
public int getLayoutVersion() {
return layoutVersion_;
}
/**
* required uint32 layoutVersion = 1;
*
*
* Layout version of the file system
*
*/
public Builder setLayoutVersion(int value) {
bitField0_ |= 0x00000001;
layoutVersion_ = value;
onChanged();
return this;
}
/**
* required uint32 layoutVersion = 1;
*
*
* Layout version of the file system
*
*/
public Builder clearLayoutVersion() {
bitField0_ = (bitField0_ & ~0x00000001);
layoutVersion_ = 0;
onChanged();
return this;
}
// required uint32 namespceID = 2;
private int namespceID_ ;
/**
* required uint32 namespceID = 2;
*
*
* File system namespace ID
*
*/
public boolean hasNamespceID() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint32 namespceID = 2;
*
*
* File system namespace ID
*
*/
public int getNamespceID() {
return namespceID_;
}
/**
* required uint32 namespceID = 2;
*
*
* File system namespace ID
*
*/
public Builder setNamespceID(int value) {
bitField0_ |= 0x00000002;
namespceID_ = value;
onChanged();
return this;
}
/**
* required uint32 namespceID = 2;
*
*
* File system namespace ID
*
*/
public Builder clearNamespceID() {
bitField0_ = (bitField0_ & ~0x00000002);
namespceID_ = 0;
onChanged();
return this;
}
// required string clusterID = 3;
private java.lang.Object clusterID_ = "";
/**
* required string clusterID = 3;
*
*
* ID of the cluster
*
*/
public boolean hasClusterID() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required string clusterID = 3;
*
*
* ID of the cluster
*
*/
public java.lang.String getClusterID() {
java.lang.Object ref = clusterID_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
clusterID_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string clusterID = 3;
*
*
* ID of the cluster
*
*/
public com.google.protobuf.ByteString
getClusterIDBytes() {
java.lang.Object ref = clusterID_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
clusterID_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string clusterID = 3;
*
*
* ID of the cluster
*
*/
public Builder setClusterID(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
clusterID_ = value;
onChanged();
return this;
}
/**
* required string clusterID = 3;
*
*
* ID of the cluster
*
*/
public Builder clearClusterID() {
bitField0_ = (bitField0_ & ~0x00000004);
clusterID_ = getDefaultInstance().getClusterID();
onChanged();
return this;
}
/**
* required string clusterID = 3;
*
*
* ID of the cluster
*
*/
public Builder setClusterIDBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
clusterID_ = value;
onChanged();
return this;
}
// required uint64 cTime = 4;
private long cTime_ ;
/**
* required uint64 cTime = 4;
*
*
* File system creation time
*
*/
public boolean hasCTime() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required uint64 cTime = 4;
*
*
* File system creation time
*
*/
public long getCTime() {
return cTime_;
}
/**
* required uint64 cTime = 4;
*
*
* File system creation time
*
*/
public Builder setCTime(long value) {
bitField0_ |= 0x00000008;
cTime_ = value;
onChanged();
return this;
}
/**
* required uint64 cTime = 4;
*
*
* File system creation time
*
*/
public Builder clearCTime() {
bitField0_ = (bitField0_ & ~0x00000008);
cTime_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageInfoProto)
}
static {
defaultInstance = new StorageInfoProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageInfoProto)
}
public interface NamenodeRegistrationProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string rpcAddress = 1;
/**
* required string rpcAddress = 1;
*
*
* host:port of the namenode RPC address
*
*/
boolean hasRpcAddress();
/**
* required string rpcAddress = 1;
*
*
* host:port of the namenode RPC address
*
*/
java.lang.String getRpcAddress();
/**
* required string rpcAddress = 1;
*
*
* host:port of the namenode RPC address
*
*/
com.google.protobuf.ByteString
getRpcAddressBytes();
// required string httpAddress = 2;
/**
* required string httpAddress = 2;
*
*
* host:port of the namenode http server
*
*/
boolean hasHttpAddress();
/**
* required string httpAddress = 2;
*
*
* host:port of the namenode http server
*
*/
java.lang.String getHttpAddress();
/**
* required string httpAddress = 2;
*
*
* host:port of the namenode http server
*
*/
com.google.protobuf.ByteString
getHttpAddressBytes();
// required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
*
*
* Node information
*
*/
boolean hasStorageInfo();
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
*
*
* Node information
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo();
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
*
*
* Node information
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder();
// optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE];
/**
* optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE];
*
*
* Namenode role
*
*/
boolean hasRole();
/**
* optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE];
*
*
* Namenode role
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto getRole();
}
/**
* Protobuf type {@code hadoop.hdfs.NamenodeRegistrationProto}
*
*
**
* Information sent by a namenode to identify itself to the primary namenode.
*
*/
public static final class NamenodeRegistrationProto extends
com.google.protobuf.GeneratedMessage
implements NamenodeRegistrationProtoOrBuilder {
// Use NamenodeRegistrationProto.newBuilder() to construct.
private NamenodeRegistrationProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private NamenodeRegistrationProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final NamenodeRegistrationProto defaultInstance;
public static NamenodeRegistrationProto getDefaultInstance() {
return defaultInstance;
}
public NamenodeRegistrationProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private NamenodeRegistrationProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
rpcAddress_ = input.readBytes();
break;
}
case 18: {
bitField0_ |= 0x00000002;
httpAddress_ = input.readBytes();
break;
}
case 26: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
subBuilder = storageInfo_.toBuilder();
}
storageInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(storageInfo_);
storageInfo_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000004;
break;
}
case 32: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(4, rawValue);
} else {
bitField0_ |= 0x00000008;
role_ = value;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_NamenodeRegistrationProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_NamenodeRegistrationProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public NamenodeRegistrationProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new NamenodeRegistrationProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
/**
* Protobuf enum {@code hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto}
*/
public enum NamenodeRoleProto
implements com.google.protobuf.ProtocolMessageEnum {
/**
* NAMENODE = 1;
*/
NAMENODE(0, 1),
/**
* BACKUP = 2;
*/
BACKUP(1, 2),
/**
* CHECKPOINT = 3;
*/
CHECKPOINT(2, 3),
;
/**
* NAMENODE = 1;
*/
public static final int NAMENODE_VALUE = 1;
/**
* BACKUP = 2;
*/
public static final int BACKUP_VALUE = 2;
/**
* CHECKPOINT = 3;
*/
public static final int CHECKPOINT_VALUE = 3;
public final int getNumber() { return value; }
public static NamenodeRoleProto valueOf(int value) {
switch (value) {
case 1: return NAMENODE;
case 2: return BACKUP;
case 3: return CHECKPOINT;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public NamenodeRoleProto findValueByNumber(int number) {
return NamenodeRoleProto.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDescriptor().getEnumTypes().get(0);
}
private static final NamenodeRoleProto[] VALUES = values();
public static NamenodeRoleProto valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private NamenodeRoleProto(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto)
}
private int bitField0_;
// required string rpcAddress = 1;
public static final int RPCADDRESS_FIELD_NUMBER = 1;
private java.lang.Object rpcAddress_;
/**
* required string rpcAddress = 1;
*
*
* host:port of the namenode RPC address
*
*/
public boolean hasRpcAddress() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string rpcAddress = 1;
*
*
* host:port of the namenode RPC address
*
*/
public java.lang.String getRpcAddress() {
java.lang.Object ref = rpcAddress_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
rpcAddress_ = s;
}
return s;
}
}
/**
* required string rpcAddress = 1;
*
*
* host:port of the namenode RPC address
*
*/
public com.google.protobuf.ByteString
getRpcAddressBytes() {
java.lang.Object ref = rpcAddress_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
rpcAddress_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required string httpAddress = 2;
public static final int HTTPADDRESS_FIELD_NUMBER = 2;
private java.lang.Object httpAddress_;
/**
* required string httpAddress = 2;
*
*
* host:port of the namenode http server
*
*/
public boolean hasHttpAddress() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string httpAddress = 2;
*
*
* host:port of the namenode http server
*
*/
public java.lang.String getHttpAddress() {
java.lang.Object ref = httpAddress_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
httpAddress_ = s;
}
return s;
}
}
/**
* required string httpAddress = 2;
*
*
* host:port of the namenode http server
*
*/
public com.google.protobuf.ByteString
getHttpAddressBytes() {
java.lang.Object ref = httpAddress_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
httpAddress_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
public static final int STORAGEINFO_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto storageInfo_;
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
*
*
* Node information
*
*/
public boolean hasStorageInfo() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
*
*
* Node information
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo() {
return storageInfo_;
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
*
*
* Node information
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() {
return storageInfo_;
}
// optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE];
public static final int ROLE_FIELD_NUMBER = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto role_;
/**
* optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE];
*
*
* Namenode role
*
*/
public boolean hasRole() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE];
*
*
* Namenode role
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto getRole() {
return role_;
}
private void initFields() {
rpcAddress_ = "";
httpAddress_ = "";
storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance();
role_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto.NAMENODE;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasRpcAddress()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasHttpAddress()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasStorageInfo()) {
memoizedIsInitialized = 0;
return false;
}
if (!getStorageInfo().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getRpcAddressBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getHttpAddressBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeMessage(3, storageInfo_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeEnum(4, role_.getNumber());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getRpcAddressBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getHttpAddressBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, storageInfo_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(4, role_.getNumber());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto) obj;
boolean result = true;
result = result && (hasRpcAddress() == other.hasRpcAddress());
if (hasRpcAddress()) {
result = result && getRpcAddress()
.equals(other.getRpcAddress());
}
result = result && (hasHttpAddress() == other.hasHttpAddress());
if (hasHttpAddress()) {
result = result && getHttpAddress()
.equals(other.getHttpAddress());
}
result = result && (hasStorageInfo() == other.hasStorageInfo());
if (hasStorageInfo()) {
result = result && getStorageInfo()
.equals(other.getStorageInfo());
}
result = result && (hasRole() == other.hasRole());
if (hasRole()) {
result = result &&
(getRole() == other.getRole());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasRpcAddress()) {
hash = (37 * hash) + RPCADDRESS_FIELD_NUMBER;
hash = (53 * hash) + getRpcAddress().hashCode();
}
if (hasHttpAddress()) {
hash = (37 * hash) + HTTPADDRESS_FIELD_NUMBER;
hash = (53 * hash) + getHttpAddress().hashCode();
}
if (hasStorageInfo()) {
hash = (37 * hash) + STORAGEINFO_FIELD_NUMBER;
hash = (53 * hash) + getStorageInfo().hashCode();
}
if (hasRole()) {
hash = (37 * hash) + ROLE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getRole());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.NamenodeRegistrationProto}
*
*
**
* Information sent by a namenode to identify itself to the primary namenode.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_NamenodeRegistrationProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_NamenodeRegistrationProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getStorageInfoFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
rpcAddress_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
httpAddress_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
if (storageInfoBuilder_ == null) {
storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance();
} else {
storageInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
role_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto.NAMENODE;
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_NamenodeRegistrationProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.rpcAddress_ = rpcAddress_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.httpAddress_ = httpAddress_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
if (storageInfoBuilder_ == null) {
result.storageInfo_ = storageInfo_;
} else {
result.storageInfo_ = storageInfoBuilder_.build();
}
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.role_ = role_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance()) return this;
if (other.hasRpcAddress()) {
bitField0_ |= 0x00000001;
rpcAddress_ = other.rpcAddress_;
onChanged();
}
if (other.hasHttpAddress()) {
bitField0_ |= 0x00000002;
httpAddress_ = other.httpAddress_;
onChanged();
}
if (other.hasStorageInfo()) {
mergeStorageInfo(other.getStorageInfo());
}
if (other.hasRole()) {
setRole(other.getRole());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasRpcAddress()) {
return false;
}
if (!hasHttpAddress()) {
return false;
}
if (!hasStorageInfo()) {
return false;
}
if (!getStorageInfo().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string rpcAddress = 1;
private java.lang.Object rpcAddress_ = "";
/**
* required string rpcAddress = 1;
*
*
* host:port of the namenode RPC address
*
*/
public boolean hasRpcAddress() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string rpcAddress = 1;
*
*
* host:port of the namenode RPC address
*
*/
public java.lang.String getRpcAddress() {
java.lang.Object ref = rpcAddress_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
rpcAddress_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string rpcAddress = 1;
*
*
* host:port of the namenode RPC address
*
*/
public com.google.protobuf.ByteString
getRpcAddressBytes() {
java.lang.Object ref = rpcAddress_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
rpcAddress_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string rpcAddress = 1;
*
*
* host:port of the namenode RPC address
*
*/
public Builder setRpcAddress(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
rpcAddress_ = value;
onChanged();
return this;
}
/**
* required string rpcAddress = 1;
*
*
* host:port of the namenode RPC address
*
*/
public Builder clearRpcAddress() {
bitField0_ = (bitField0_ & ~0x00000001);
rpcAddress_ = getDefaultInstance().getRpcAddress();
onChanged();
return this;
}
/**
* required string rpcAddress = 1;
*
*
* host:port of the namenode RPC address
*
*/
public Builder setRpcAddressBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
rpcAddress_ = value;
onChanged();
return this;
}
// required string httpAddress = 2;
private java.lang.Object httpAddress_ = "";
/**
* required string httpAddress = 2;
*
*
* host:port of the namenode http server
*
*/
public boolean hasHttpAddress() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string httpAddress = 2;
*
*
* host:port of the namenode http server
*
*/
public java.lang.String getHttpAddress() {
java.lang.Object ref = httpAddress_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
httpAddress_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string httpAddress = 2;
*
*
* host:port of the namenode http server
*
*/
public com.google.protobuf.ByteString
getHttpAddressBytes() {
java.lang.Object ref = httpAddress_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
httpAddress_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string httpAddress = 2;
*
*
* host:port of the namenode http server
*
*/
public Builder setHttpAddress(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
httpAddress_ = value;
onChanged();
return this;
}
/**
* required string httpAddress = 2;
*
*
* host:port of the namenode http server
*
*/
public Builder clearHttpAddress() {
bitField0_ = (bitField0_ & ~0x00000002);
httpAddress_ = getDefaultInstance().getHttpAddress();
onChanged();
return this;
}
/**
* required string httpAddress = 2;
*
*
* host:port of the namenode http server
*
*/
public Builder setHttpAddressBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
httpAddress_ = value;
onChanged();
return this;
}
// required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder> storageInfoBuilder_;
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
*
*
* Node information
*
*/
public boolean hasStorageInfo() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
*
*
* Node information
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo() {
if (storageInfoBuilder_ == null) {
return storageInfo_;
} else {
return storageInfoBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
*
*
* Node information
*
*/
public Builder setStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) {
if (storageInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
storageInfo_ = value;
onChanged();
} else {
storageInfoBuilder_.setMessage(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
*
*
* Node information
*
*/
public Builder setStorageInfo(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder builderForValue) {
if (storageInfoBuilder_ == null) {
storageInfo_ = builderForValue.build();
onChanged();
} else {
storageInfoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000004;
return this;
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
*
*
* Node information
*
*/
public Builder mergeStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) {
if (storageInfoBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004) &&
storageInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance()) {
storageInfo_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.newBuilder(storageInfo_).mergeFrom(value).buildPartial();
} else {
storageInfo_ = value;
}
onChanged();
} else {
storageInfoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
*
*
* Node information
*
*/
public Builder clearStorageInfo() {
if (storageInfoBuilder_ == null) {
storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance();
onChanged();
} else {
storageInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
*
*
* Node information
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder getStorageInfoBuilder() {
bitField0_ |= 0x00000004;
onChanged();
return getStorageInfoFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
*
*
* Node information
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() {
if (storageInfoBuilder_ != null) {
return storageInfoBuilder_.getMessageOrBuilder();
} else {
return storageInfo_;
}
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 3;
*
*
* Node information
*
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder>
getStorageInfoFieldBuilder() {
if (storageInfoBuilder_ == null) {
storageInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder>(
storageInfo_,
getParentForChildren(),
isClean());
storageInfo_ = null;
}
return storageInfoBuilder_;
}
// optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE];
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto role_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto.NAMENODE;
/**
* optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE];
*
*
* Namenode role
*
*/
public boolean hasRole() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE];
*
*
* Namenode role
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto getRole() {
return role_;
}
/**
* optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE];
*
*
* Namenode role
*
*/
public Builder setRole(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
role_ = value;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.NamenodeRegistrationProto.NamenodeRoleProto role = 4 [default = NAMENODE];
*
*
* Namenode role
*
*/
public Builder clearRole() {
bitField0_ = (bitField0_ & ~0x00000008);
role_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto.NAMENODE;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.NamenodeRegistrationProto)
}
static {
defaultInstance = new NamenodeRegistrationProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.NamenodeRegistrationProto)
}
public interface CheckpointSignatureProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string blockPoolId = 1;
/**
* required string blockPoolId = 1;
*/
boolean hasBlockPoolId();
/**
* required string blockPoolId = 1;
*/
java.lang.String getBlockPoolId();
/**
* required string blockPoolId = 1;
*/
com.google.protobuf.ByteString
getBlockPoolIdBytes();
// required uint64 mostRecentCheckpointTxId = 2;
/**
* required uint64 mostRecentCheckpointTxId = 2;
*/
boolean hasMostRecentCheckpointTxId();
/**
* required uint64 mostRecentCheckpointTxId = 2;
*/
long getMostRecentCheckpointTxId();
// required uint64 curSegmentTxId = 3;
/**
* required uint64 curSegmentTxId = 3;
*/
boolean hasCurSegmentTxId();
/**
* required uint64 curSegmentTxId = 3;
*/
long getCurSegmentTxId();
// required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*/
boolean hasStorageInfo();
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo();
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.CheckpointSignatureProto}
*
*
**
* Unique signature to identify checkpoint transactions.
*
*/
public static final class CheckpointSignatureProto extends
com.google.protobuf.GeneratedMessage
implements CheckpointSignatureProtoOrBuilder {
// Use CheckpointSignatureProto.newBuilder() to construct.
private CheckpointSignatureProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private CheckpointSignatureProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final CheckpointSignatureProto defaultInstance;
public static CheckpointSignatureProto getDefaultInstance() {
return defaultInstance;
}
public CheckpointSignatureProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private CheckpointSignatureProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
blockPoolId_ = input.readBytes();
break;
}
case 16: {
bitField0_ |= 0x00000002;
mostRecentCheckpointTxId_ = input.readUInt64();
break;
}
case 24: {
bitField0_ |= 0x00000004;
curSegmentTxId_ = input.readUInt64();
break;
}
case 34: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000008) == 0x00000008)) {
subBuilder = storageInfo_.toBuilder();
}
storageInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(storageInfo_);
storageInfo_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000008;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CheckpointSignatureProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CheckpointSignatureProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public CheckpointSignatureProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new CheckpointSignatureProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required string blockPoolId = 1;
public static final int BLOCKPOOLID_FIELD_NUMBER = 1;
private java.lang.Object blockPoolId_;
/**
* required string blockPoolId = 1;
*/
public boolean hasBlockPoolId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string blockPoolId = 1;
*/
public java.lang.String getBlockPoolId() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
blockPoolId_ = s;
}
return s;
}
}
/**
* required string blockPoolId = 1;
*/
public com.google.protobuf.ByteString
getBlockPoolIdBytes() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
blockPoolId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required uint64 mostRecentCheckpointTxId = 2;
public static final int MOSTRECENTCHECKPOINTTXID_FIELD_NUMBER = 2;
private long mostRecentCheckpointTxId_;
/**
* required uint64 mostRecentCheckpointTxId = 2;
*/
public boolean hasMostRecentCheckpointTxId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 mostRecentCheckpointTxId = 2;
*/
public long getMostRecentCheckpointTxId() {
return mostRecentCheckpointTxId_;
}
// required uint64 curSegmentTxId = 3;
public static final int CURSEGMENTTXID_FIELD_NUMBER = 3;
private long curSegmentTxId_;
/**
* required uint64 curSegmentTxId = 3;
*/
public boolean hasCurSegmentTxId() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 curSegmentTxId = 3;
*/
public long getCurSegmentTxId() {
return curSegmentTxId_;
}
// required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
public static final int STORAGEINFO_FIELD_NUMBER = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto storageInfo_;
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*/
public boolean hasStorageInfo() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo() {
return storageInfo_;
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() {
return storageInfo_;
}
private void initFields() {
blockPoolId_ = "";
mostRecentCheckpointTxId_ = 0L;
curSegmentTxId_ = 0L;
storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasBlockPoolId()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasMostRecentCheckpointTxId()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasCurSegmentTxId()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasStorageInfo()) {
memoizedIsInitialized = 0;
return false;
}
if (!getStorageInfo().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getBlockPoolIdBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, mostRecentCheckpointTxId_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt64(3, curSegmentTxId_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeMessage(4, storageInfo_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getBlockPoolIdBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, mostRecentCheckpointTxId_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(3, curSegmentTxId_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(4, storageInfo_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto) obj;
boolean result = true;
result = result && (hasBlockPoolId() == other.hasBlockPoolId());
if (hasBlockPoolId()) {
result = result && getBlockPoolId()
.equals(other.getBlockPoolId());
}
result = result && (hasMostRecentCheckpointTxId() == other.hasMostRecentCheckpointTxId());
if (hasMostRecentCheckpointTxId()) {
result = result && (getMostRecentCheckpointTxId()
== other.getMostRecentCheckpointTxId());
}
result = result && (hasCurSegmentTxId() == other.hasCurSegmentTxId());
if (hasCurSegmentTxId()) {
result = result && (getCurSegmentTxId()
== other.getCurSegmentTxId());
}
result = result && (hasStorageInfo() == other.hasStorageInfo());
if (hasStorageInfo()) {
result = result && getStorageInfo()
.equals(other.getStorageInfo());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasBlockPoolId()) {
hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER;
hash = (53 * hash) + getBlockPoolId().hashCode();
}
if (hasMostRecentCheckpointTxId()) {
hash = (37 * hash) + MOSTRECENTCHECKPOINTTXID_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getMostRecentCheckpointTxId());
}
if (hasCurSegmentTxId()) {
hash = (37 * hash) + CURSEGMENTTXID_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getCurSegmentTxId());
}
if (hasStorageInfo()) {
hash = (37 * hash) + STORAGEINFO_FIELD_NUMBER;
hash = (53 * hash) + getStorageInfo().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.CheckpointSignatureProto}
*
*
**
* Unique signature to identify checkpoint transactions.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CheckpointSignatureProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CheckpointSignatureProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getStorageInfoFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
blockPoolId_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
mostRecentCheckpointTxId_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
curSegmentTxId_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
if (storageInfoBuilder_ == null) {
storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance();
} else {
storageInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CheckpointSignatureProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.blockPoolId_ = blockPoolId_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.mostRecentCheckpointTxId_ = mostRecentCheckpointTxId_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.curSegmentTxId_ = curSegmentTxId_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
if (storageInfoBuilder_ == null) {
result.storageInfo_ = storageInfo_;
} else {
result.storageInfo_ = storageInfoBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance()) return this;
if (other.hasBlockPoolId()) {
bitField0_ |= 0x00000001;
blockPoolId_ = other.blockPoolId_;
onChanged();
}
if (other.hasMostRecentCheckpointTxId()) {
setMostRecentCheckpointTxId(other.getMostRecentCheckpointTxId());
}
if (other.hasCurSegmentTxId()) {
setCurSegmentTxId(other.getCurSegmentTxId());
}
if (other.hasStorageInfo()) {
mergeStorageInfo(other.getStorageInfo());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasBlockPoolId()) {
return false;
}
if (!hasMostRecentCheckpointTxId()) {
return false;
}
if (!hasCurSegmentTxId()) {
return false;
}
if (!hasStorageInfo()) {
return false;
}
if (!getStorageInfo().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string blockPoolId = 1;
private java.lang.Object blockPoolId_ = "";
/**
* required string blockPoolId = 1;
*/
public boolean hasBlockPoolId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string blockPoolId = 1;
*/
public java.lang.String getBlockPoolId() {
java.lang.Object ref = blockPoolId_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
blockPoolId_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string blockPoolId = 1;
*/
public com.google.protobuf.ByteString
getBlockPoolIdBytes() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
blockPoolId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string blockPoolId = 1;
*/
public Builder setBlockPoolId(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
blockPoolId_ = value;
onChanged();
return this;
}
/**
* required string blockPoolId = 1;
*/
public Builder clearBlockPoolId() {
bitField0_ = (bitField0_ & ~0x00000001);
blockPoolId_ = getDefaultInstance().getBlockPoolId();
onChanged();
return this;
}
/**
* required string blockPoolId = 1;
*/
public Builder setBlockPoolIdBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
blockPoolId_ = value;
onChanged();
return this;
}
// required uint64 mostRecentCheckpointTxId = 2;
private long mostRecentCheckpointTxId_ ;
/**
* required uint64 mostRecentCheckpointTxId = 2;
*/
public boolean hasMostRecentCheckpointTxId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 mostRecentCheckpointTxId = 2;
*/
public long getMostRecentCheckpointTxId() {
return mostRecentCheckpointTxId_;
}
/**
* required uint64 mostRecentCheckpointTxId = 2;
*/
public Builder setMostRecentCheckpointTxId(long value) {
bitField0_ |= 0x00000002;
mostRecentCheckpointTxId_ = value;
onChanged();
return this;
}
/**
* required uint64 mostRecentCheckpointTxId = 2;
*/
public Builder clearMostRecentCheckpointTxId() {
bitField0_ = (bitField0_ & ~0x00000002);
mostRecentCheckpointTxId_ = 0L;
onChanged();
return this;
}
// required uint64 curSegmentTxId = 3;
private long curSegmentTxId_ ;
/**
* required uint64 curSegmentTxId = 3;
*/
public boolean hasCurSegmentTxId() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 curSegmentTxId = 3;
*/
public long getCurSegmentTxId() {
return curSegmentTxId_;
}
/**
* required uint64 curSegmentTxId = 3;
*/
public Builder setCurSegmentTxId(long value) {
bitField0_ |= 0x00000004;
curSegmentTxId_ = value;
onChanged();
return this;
}
/**
* required uint64 curSegmentTxId = 3;
*/
public Builder clearCurSegmentTxId() {
bitField0_ = (bitField0_ & ~0x00000004);
curSegmentTxId_ = 0L;
onChanged();
return this;
}
// required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder> storageInfoBuilder_;
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*/
public boolean hasStorageInfo() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo() {
if (storageInfoBuilder_ == null) {
return storageInfo_;
} else {
return storageInfoBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*/
public Builder setStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) {
if (storageInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
storageInfo_ = value;
onChanged();
} else {
storageInfoBuilder_.setMessage(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*/
public Builder setStorageInfo(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder builderForValue) {
if (storageInfoBuilder_ == null) {
storageInfo_ = builderForValue.build();
onChanged();
} else {
storageInfoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000008;
return this;
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*/
public Builder mergeStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) {
if (storageInfoBuilder_ == null) {
if (((bitField0_ & 0x00000008) == 0x00000008) &&
storageInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance()) {
storageInfo_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.newBuilder(storageInfo_).mergeFrom(value).buildPartial();
} else {
storageInfo_ = value;
}
onChanged();
} else {
storageInfoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*/
public Builder clearStorageInfo() {
if (storageInfoBuilder_ == null) {
storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance();
onChanged();
} else {
storageInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder getStorageInfoBuilder() {
bitField0_ |= 0x00000008;
onChanged();
return getStorageInfoFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() {
if (storageInfoBuilder_ != null) {
return storageInfoBuilder_.getMessageOrBuilder();
} else {
return storageInfo_;
}
}
/**
* required .hadoop.hdfs.StorageInfoProto storageInfo = 4;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder>
getStorageInfoFieldBuilder() {
if (storageInfoBuilder_ == null) {
storageInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder>(
storageInfo_,
getParentForChildren(),
isClean());
storageInfo_ = null;
}
return storageInfoBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.CheckpointSignatureProto)
}
static {
defaultInstance = new CheckpointSignatureProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.CheckpointSignatureProto)
}
public interface NamenodeCommandProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required uint32 action = 1;
/**
* required uint32 action = 1;
*/
boolean hasAction();
/**
* required uint32 action = 1;
*/
int getAction();
// required .hadoop.hdfs.NamenodeCommandProto.Type type = 2;
/**
* required .hadoop.hdfs.NamenodeCommandProto.Type type = 2;
*/
boolean hasType();
/**
* required .hadoop.hdfs.NamenodeCommandProto.Type type = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type getType();
// optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3;
/**
* optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3;
*/
boolean hasCheckpointCmd();
/**
* optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto getCheckpointCmd();
/**
* optional .hadoop.hdfs.CheckpointCommandProto checkpointCmd = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProtoOrBuilder getCheckpointCmdOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.NamenodeCommandProto}
*
*
**
* Command sent from one namenode to another namenode.
*
*/
public static final class NamenodeCommandProto extends
com.google.protobuf.GeneratedMessage
implements NamenodeCommandProtoOrBuilder {
// Use NamenodeCommandProto.newBuilder() to construct.
private NamenodeCommandProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private NamenodeCommandProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final NamenodeCommandProto defaultInstance;
public static NamenodeCommandProto getDefaultInstance() {
return defaultInstance;
}
public NamenodeCommandProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private NamenodeCommandProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
action_ = input.readUInt32();
break;
}
case 16: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(2, rawValue);
} else {
bitField0_ |= 0x00000002;
type_ = value;
}
break;
}
case 26: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
subBuilder = checkpointCmd_.toBuilder();
}
checkpointCmd_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(checkpointCmd_);
checkpointCmd_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000004;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_NamenodeCommandProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_NamenodeCommandProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public NamenodeCommandProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new NamenodeCommandProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
/**
* Protobuf enum {@code hadoop.hdfs.NamenodeCommandProto.Type}
*/
public enum Type
implements com.google.protobuf.ProtocolMessageEnum {
/**
*