Please wait. This can take some minutes ...
Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance.
Project price only 1 $
You can buy this project and download/modify it how often you want.
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos Maven / Gradle / Ivy
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: hdfs.proto
package org.apache.hadoop.hdfs.protocol.proto;
public final class HdfsProtos {
private HdfsProtos() {}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
}
/**
* Protobuf enum {@code hadoop.hdfs.StorageTypeProto}
*
*
**
* Types of recognized storage media.
*
*/
public enum StorageTypeProto
implements com.google.protobuf.ProtocolMessageEnum {
/**
* DISK = 1;
*/
DISK(0, 1),
/**
* SSD = 2;
*/
SSD(1, 2),
/**
* ARCHIVE = 3;
*/
ARCHIVE(2, 3),
/**
* RAM_DISK = 4;
*/
RAM_DISK(3, 4),
/**
* PROVIDED = 5;
*/
PROVIDED(4, 5),
;
/**
* DISK = 1;
*/
public static final int DISK_VALUE = 1;
/**
* SSD = 2;
*/
public static final int SSD_VALUE = 2;
/**
* ARCHIVE = 3;
*/
public static final int ARCHIVE_VALUE = 3;
/**
* RAM_DISK = 4;
*/
public static final int RAM_DISK_VALUE = 4;
/**
* PROVIDED = 5;
*/
public static final int PROVIDED_VALUE = 5;
public final int getNumber() { return value; }
public static StorageTypeProto valueOf(int value) {
switch (value) {
case 1: return DISK;
case 2: return SSD;
case 3: return ARCHIVE;
case 4: return RAM_DISK;
case 5: return PROVIDED;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public StorageTypeProto findValueByNumber(int number) {
return StorageTypeProto.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(0);
}
private static final StorageTypeProto[] VALUES = values();
public static StorageTypeProto valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private StorageTypeProto(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.StorageTypeProto)
}
/**
* Protobuf enum {@code hadoop.hdfs.BlockTypeProto}
*
*
**
* Types of recognized blocks.
*
*/
public enum BlockTypeProto
implements com.google.protobuf.ProtocolMessageEnum {
/**
* CONTIGUOUS = 0;
*/
CONTIGUOUS(0, 0),
/**
* STRIPED = 1;
*/
STRIPED(1, 1),
;
/**
* CONTIGUOUS = 0;
*/
public static final int CONTIGUOUS_VALUE = 0;
/**
* STRIPED = 1;
*/
public static final int STRIPED_VALUE = 1;
public final int getNumber() { return value; }
public static BlockTypeProto valueOf(int value) {
switch (value) {
case 0: return CONTIGUOUS;
case 1: return STRIPED;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public BlockTypeProto findValueByNumber(int number) {
return BlockTypeProto.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(1);
}
private static final BlockTypeProto[] VALUES = values();
public static BlockTypeProto valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private BlockTypeProto(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.BlockTypeProto)
}
/**
* Protobuf enum {@code hadoop.hdfs.CipherSuiteProto}
*
*
**
* Cipher suite.
*
*/
public enum CipherSuiteProto
implements com.google.protobuf.ProtocolMessageEnum {
/**
* UNKNOWN = 1;
*/
UNKNOWN(0, 1),
/**
* AES_CTR_NOPADDING = 2;
*/
AES_CTR_NOPADDING(1, 2),
;
/**
* UNKNOWN = 1;
*/
public static final int UNKNOWN_VALUE = 1;
/**
* AES_CTR_NOPADDING = 2;
*/
public static final int AES_CTR_NOPADDING_VALUE = 2;
public final int getNumber() { return value; }
public static CipherSuiteProto valueOf(int value) {
switch (value) {
case 1: return UNKNOWN;
case 2: return AES_CTR_NOPADDING;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public CipherSuiteProto findValueByNumber(int number) {
return CipherSuiteProto.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(2);
}
private static final CipherSuiteProto[] VALUES = values();
public static CipherSuiteProto valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private CipherSuiteProto(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.CipherSuiteProto)
}
/**
* Protobuf enum {@code hadoop.hdfs.CryptoProtocolVersionProto}
*
*
**
* Crypto protocol version used to access encrypted files.
*
*/
public enum CryptoProtocolVersionProto
implements com.google.protobuf.ProtocolMessageEnum {
/**
* UNKNOWN_PROTOCOL_VERSION = 1;
*/
UNKNOWN_PROTOCOL_VERSION(0, 1),
/**
* ENCRYPTION_ZONES = 2;
*/
ENCRYPTION_ZONES(1, 2),
;
/**
* UNKNOWN_PROTOCOL_VERSION = 1;
*/
public static final int UNKNOWN_PROTOCOL_VERSION_VALUE = 1;
/**
* ENCRYPTION_ZONES = 2;
*/
public static final int ENCRYPTION_ZONES_VALUE = 2;
public final int getNumber() { return value; }
public static CryptoProtocolVersionProto valueOf(int value) {
switch (value) {
case 1: return UNKNOWN_PROTOCOL_VERSION;
case 2: return ENCRYPTION_ZONES;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public CryptoProtocolVersionProto findValueByNumber(int number) {
return CryptoProtocolVersionProto.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(3);
}
private static final CryptoProtocolVersionProto[] VALUES = values();
public static CryptoProtocolVersionProto valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private CryptoProtocolVersionProto(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.CryptoProtocolVersionProto)
}
/**
* Protobuf enum {@code hadoop.hdfs.ErasureCodingPolicyState}
*
*
**
* EC policy state.
*
*/
public enum ErasureCodingPolicyState
implements com.google.protobuf.ProtocolMessageEnum {
/**
* DISABLED = 1;
*/
DISABLED(0, 1),
/**
* ENABLED = 2;
*/
ENABLED(1, 2),
/**
* REMOVED = 3;
*/
REMOVED(2, 3),
;
/**
* DISABLED = 1;
*/
public static final int DISABLED_VALUE = 1;
/**
* ENABLED = 2;
*/
public static final int ENABLED_VALUE = 2;
/**
* REMOVED = 3;
*/
public static final int REMOVED_VALUE = 3;
public final int getNumber() { return value; }
public static ErasureCodingPolicyState valueOf(int value) {
switch (value) {
case 1: return DISABLED;
case 2: return ENABLED;
case 3: return REMOVED;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public ErasureCodingPolicyState findValueByNumber(int number) {
return ErasureCodingPolicyState.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(4);
}
private static final ErasureCodingPolicyState[] VALUES = values();
public static ErasureCodingPolicyState valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private ErasureCodingPolicyState(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.ErasureCodingPolicyState)
}
/**
* Protobuf enum {@code hadoop.hdfs.ChecksumTypeProto}
*
*
**
* Checksum algorithms/types used in HDFS
* Make sure this enum's integer values match enum values' id properties defined
* in org.apache.hadoop.util.DataChecksum.Type
*
*/
public enum ChecksumTypeProto
implements com.google.protobuf.ProtocolMessageEnum {
/**
* CHECKSUM_NULL = 0;
*/
CHECKSUM_NULL(0, 0),
/**
* CHECKSUM_CRC32 = 1;
*/
CHECKSUM_CRC32(1, 1),
/**
* CHECKSUM_CRC32C = 2;
*/
CHECKSUM_CRC32C(2, 2),
;
/**
* CHECKSUM_NULL = 0;
*/
public static final int CHECKSUM_NULL_VALUE = 0;
/**
* CHECKSUM_CRC32 = 1;
*/
public static final int CHECKSUM_CRC32_VALUE = 1;
/**
* CHECKSUM_CRC32C = 2;
*/
public static final int CHECKSUM_CRC32C_VALUE = 2;
public final int getNumber() { return value; }
public static ChecksumTypeProto valueOf(int value) {
switch (value) {
case 0: return CHECKSUM_NULL;
case 1: return CHECKSUM_CRC32;
case 2: return CHECKSUM_CRC32C;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public ChecksumTypeProto findValueByNumber(int number) {
return ChecksumTypeProto.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(5);
}
private static final ChecksumTypeProto[] VALUES = values();
public static ChecksumTypeProto valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private ChecksumTypeProto(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.ChecksumTypeProto)
}
/**
* Protobuf enum {@code hadoop.hdfs.BlockChecksumTypeProto}
*/
public enum BlockChecksumTypeProto
implements com.google.protobuf.ProtocolMessageEnum {
/**
* MD5CRC = 1;
*
*
* BlockChecksum obtained by taking the MD5 digest of chunk CRCs
*
*/
MD5CRC(0, 1),
/**
* COMPOSITE_CRC = 2;
*
*
* Chunk-independent CRC, optionally striped
*
*/
COMPOSITE_CRC(1, 2),
;
/**
* MD5CRC = 1;
*
*
* BlockChecksum obtained by taking the MD5 digest of chunk CRCs
*
*/
public static final int MD5CRC_VALUE = 1;
/**
* COMPOSITE_CRC = 2;
*
*
* Chunk-independent CRC, optionally striped
*
*/
public static final int COMPOSITE_CRC_VALUE = 2;
public final int getNumber() { return value; }
public static BlockChecksumTypeProto valueOf(int value) {
switch (value) {
case 1: return MD5CRC;
case 2: return COMPOSITE_CRC;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public BlockChecksumTypeProto findValueByNumber(int number) {
return BlockChecksumTypeProto.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(6);
}
private static final BlockChecksumTypeProto[] VALUES = values();
public static BlockChecksumTypeProto valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private BlockChecksumTypeProto(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.BlockChecksumTypeProto)
}
/**
* Protobuf enum {@code hadoop.hdfs.AccessModeProto}
*
*
**
* File access permissions mode.
*
*/
public enum AccessModeProto
implements com.google.protobuf.ProtocolMessageEnum {
/**
* READ = 1;
*/
READ(0, 1),
/**
* WRITE = 2;
*/
WRITE(1, 2),
/**
* COPY = 3;
*/
COPY(2, 3),
/**
* REPLACE = 4;
*/
REPLACE(3, 4),
;
/**
* READ = 1;
*/
public static final int READ_VALUE = 1;
/**
* WRITE = 2;
*/
public static final int WRITE_VALUE = 2;
/**
* COPY = 3;
*/
public static final int COPY_VALUE = 3;
/**
* REPLACE = 4;
*/
public static final int REPLACE_VALUE = 4;
public final int getNumber() { return value; }
public static AccessModeProto valueOf(int value) {
switch (value) {
case 1: return READ;
case 2: return WRITE;
case 3: return COPY;
case 4: return REPLACE;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public AccessModeProto findValueByNumber(int number) {
return AccessModeProto.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(7);
}
private static final AccessModeProto[] VALUES = values();
public static AccessModeProto valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private AccessModeProto(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.AccessModeProto)
}
public interface ExtendedBlockProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string poolId = 1;
/**
* required string poolId = 1;
*
*
* Block pool id - gloablly unique across clusters
*
*/
boolean hasPoolId();
/**
* required string poolId = 1;
*
*
* Block pool id - gloablly unique across clusters
*
*/
java.lang.String getPoolId();
/**
* required string poolId = 1;
*
*
* Block pool id - gloablly unique across clusters
*
*/
com.google.protobuf.ByteString
getPoolIdBytes();
// required uint64 blockId = 2;
/**
* required uint64 blockId = 2;
*
*
* the local id within a pool
*
*/
boolean hasBlockId();
/**
* required uint64 blockId = 2;
*
*
* the local id within a pool
*
*/
long getBlockId();
// required uint64 generationStamp = 3;
/**
* required uint64 generationStamp = 3;
*/
boolean hasGenerationStamp();
/**
* required uint64 generationStamp = 3;
*/
long getGenerationStamp();
// optional uint64 numBytes = 4 [default = 0];
/**
* optional uint64 numBytes = 4 [default = 0];
*
*
* len does not belong in ebid
*
*/
boolean hasNumBytes();
/**
* optional uint64 numBytes = 4 [default = 0];
*
*
* len does not belong in ebid
*
*/
long getNumBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.ExtendedBlockProto}
*
*
**
* Extended block idenfies a block
*
*/
public static final class ExtendedBlockProto extends
com.google.protobuf.GeneratedMessage
implements ExtendedBlockProtoOrBuilder {
// Use ExtendedBlockProto.newBuilder() to construct.
private ExtendedBlockProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ExtendedBlockProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ExtendedBlockProto defaultInstance;
public static ExtendedBlockProto getDefaultInstance() {
return defaultInstance;
}
public ExtendedBlockProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ExtendedBlockProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
poolId_ = input.readBytes();
break;
}
case 16: {
bitField0_ |= 0x00000002;
blockId_ = input.readUInt64();
break;
}
case 24: {
bitField0_ |= 0x00000004;
generationStamp_ = input.readUInt64();
break;
}
case 32: {
bitField0_ |= 0x00000008;
numBytes_ = input.readUInt64();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExtendedBlockProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExtendedBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ExtendedBlockProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ExtendedBlockProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required string poolId = 1;
public static final int POOLID_FIELD_NUMBER = 1;
private java.lang.Object poolId_;
/**
* required string poolId = 1;
*
*
* Block pool id - gloablly unique across clusters
*
*/
public boolean hasPoolId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string poolId = 1;
*
*
* Block pool id - gloablly unique across clusters
*
*/
public java.lang.String getPoolId() {
java.lang.Object ref = poolId_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
poolId_ = s;
}
return s;
}
}
/**
* required string poolId = 1;
*
*
* Block pool id - gloablly unique across clusters
*
*/
public com.google.protobuf.ByteString
getPoolIdBytes() {
java.lang.Object ref = poolId_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
poolId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required uint64 blockId = 2;
public static final int BLOCKID_FIELD_NUMBER = 2;
private long blockId_;
/**
* required uint64 blockId = 2;
*
*
* the local id within a pool
*
*/
public boolean hasBlockId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 blockId = 2;
*
*
* the local id within a pool
*
*/
public long getBlockId() {
return blockId_;
}
// required uint64 generationStamp = 3;
public static final int GENERATIONSTAMP_FIELD_NUMBER = 3;
private long generationStamp_;
/**
* required uint64 generationStamp = 3;
*/
public boolean hasGenerationStamp() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 generationStamp = 3;
*/
public long getGenerationStamp() {
return generationStamp_;
}
// optional uint64 numBytes = 4 [default = 0];
public static final int NUMBYTES_FIELD_NUMBER = 4;
private long numBytes_;
/**
* optional uint64 numBytes = 4 [default = 0];
*
*
* len does not belong in ebid
*
*/
public boolean hasNumBytes() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional uint64 numBytes = 4 [default = 0];
*
*
* len does not belong in ebid
*
*/
public long getNumBytes() {
return numBytes_;
}
private void initFields() {
poolId_ = "";
blockId_ = 0L;
generationStamp_ = 0L;
numBytes_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasPoolId()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasBlockId()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasGenerationStamp()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getPoolIdBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, blockId_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt64(3, generationStamp_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt64(4, numBytes_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getPoolIdBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, blockId_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(3, generationStamp_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(4, numBytes_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto) obj;
boolean result = true;
result = result && (hasPoolId() == other.hasPoolId());
if (hasPoolId()) {
result = result && getPoolId()
.equals(other.getPoolId());
}
result = result && (hasBlockId() == other.hasBlockId());
if (hasBlockId()) {
result = result && (getBlockId()
== other.getBlockId());
}
result = result && (hasGenerationStamp() == other.hasGenerationStamp());
if (hasGenerationStamp()) {
result = result && (getGenerationStamp()
== other.getGenerationStamp());
}
result = result && (hasNumBytes() == other.hasNumBytes());
if (hasNumBytes()) {
result = result && (getNumBytes()
== other.getNumBytes());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasPoolId()) {
hash = (37 * hash) + POOLID_FIELD_NUMBER;
hash = (53 * hash) + getPoolId().hashCode();
}
if (hasBlockId()) {
hash = (37 * hash) + BLOCKID_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getBlockId());
}
if (hasGenerationStamp()) {
hash = (37 * hash) + GENERATIONSTAMP_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getGenerationStamp());
}
if (hasNumBytes()) {
hash = (37 * hash) + NUMBYTES_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getNumBytes());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ExtendedBlockProto}
*
*
**
* Extended block idenfies a block
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExtendedBlockProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExtendedBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
poolId_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
blockId_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
generationStamp_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
numBytes_ = 0L;
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExtendedBlockProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.poolId_ = poolId_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.blockId_ = blockId_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.generationStamp_ = generationStamp_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.numBytes_ = numBytes_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) return this;
if (other.hasPoolId()) {
bitField0_ |= 0x00000001;
poolId_ = other.poolId_;
onChanged();
}
if (other.hasBlockId()) {
setBlockId(other.getBlockId());
}
if (other.hasGenerationStamp()) {
setGenerationStamp(other.getGenerationStamp());
}
if (other.hasNumBytes()) {
setNumBytes(other.getNumBytes());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasPoolId()) {
return false;
}
if (!hasBlockId()) {
return false;
}
if (!hasGenerationStamp()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string poolId = 1;
private java.lang.Object poolId_ = "";
/**
* required string poolId = 1;
*
*
* Block pool id - gloablly unique across clusters
*
*/
public boolean hasPoolId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string poolId = 1;
*
*
* Block pool id - gloablly unique across clusters
*
*/
public java.lang.String getPoolId() {
java.lang.Object ref = poolId_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
poolId_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string poolId = 1;
*
*
* Block pool id - gloablly unique across clusters
*
*/
public com.google.protobuf.ByteString
getPoolIdBytes() {
java.lang.Object ref = poolId_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
poolId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string poolId = 1;
*
*
* Block pool id - gloablly unique across clusters
*
*/
public Builder setPoolId(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
poolId_ = value;
onChanged();
return this;
}
/**
* required string poolId = 1;
*
*
* Block pool id - gloablly unique across clusters
*
*/
public Builder clearPoolId() {
bitField0_ = (bitField0_ & ~0x00000001);
poolId_ = getDefaultInstance().getPoolId();
onChanged();
return this;
}
/**
* required string poolId = 1;
*
*
* Block pool id - gloablly unique across clusters
*
*/
public Builder setPoolIdBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
poolId_ = value;
onChanged();
return this;
}
// required uint64 blockId = 2;
private long blockId_ ;
/**
* required uint64 blockId = 2;
*
*
* the local id within a pool
*
*/
public boolean hasBlockId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 blockId = 2;
*
*
* the local id within a pool
*
*/
public long getBlockId() {
return blockId_;
}
/**
* required uint64 blockId = 2;
*
*
* the local id within a pool
*
*/
public Builder setBlockId(long value) {
bitField0_ |= 0x00000002;
blockId_ = value;
onChanged();
return this;
}
/**
* required uint64 blockId = 2;
*
*
* the local id within a pool
*
*/
public Builder clearBlockId() {
bitField0_ = (bitField0_ & ~0x00000002);
blockId_ = 0L;
onChanged();
return this;
}
// required uint64 generationStamp = 3;
private long generationStamp_ ;
/**
* required uint64 generationStamp = 3;
*/
public boolean hasGenerationStamp() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 generationStamp = 3;
*/
public long getGenerationStamp() {
return generationStamp_;
}
/**
* required uint64 generationStamp = 3;
*/
public Builder setGenerationStamp(long value) {
bitField0_ |= 0x00000004;
generationStamp_ = value;
onChanged();
return this;
}
/**
* required uint64 generationStamp = 3;
*/
public Builder clearGenerationStamp() {
bitField0_ = (bitField0_ & ~0x00000004);
generationStamp_ = 0L;
onChanged();
return this;
}
// optional uint64 numBytes = 4 [default = 0];
private long numBytes_ ;
/**
* optional uint64 numBytes = 4 [default = 0];
*
*
* len does not belong in ebid
*
*/
public boolean hasNumBytes() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional uint64 numBytes = 4 [default = 0];
*
*
* len does not belong in ebid
*
*/
public long getNumBytes() {
return numBytes_;
}
/**
* optional uint64 numBytes = 4 [default = 0];
*
*
* len does not belong in ebid
*
*/
public Builder setNumBytes(long value) {
bitField0_ |= 0x00000008;
numBytes_ = value;
onChanged();
return this;
}
/**
* optional uint64 numBytes = 4 [default = 0];
*
*
* len does not belong in ebid
*
*/
public Builder clearNumBytes() {
bitField0_ = (bitField0_ & ~0x00000008);
numBytes_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ExtendedBlockProto)
}
static {
defaultInstance = new ExtendedBlockProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ExtendedBlockProto)
}
public interface ProvidedStorageLocationProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string path = 1;
/**
* required string path = 1;
*/
boolean hasPath();
/**
* required string path = 1;
*/
java.lang.String getPath();
/**
* required string path = 1;
*/
com.google.protobuf.ByteString
getPathBytes();
// required int64 offset = 2;
/**
* required int64 offset = 2;
*/
boolean hasOffset();
/**
* required int64 offset = 2;
*/
long getOffset();
// required int64 length = 3;
/**
* required int64 length = 3;
*/
boolean hasLength();
/**
* required int64 length = 3;
*/
long getLength();
// required bytes nonce = 4;
/**
* required bytes nonce = 4;
*/
boolean hasNonce();
/**
* required bytes nonce = 4;
*/
com.google.protobuf.ByteString getNonce();
}
/**
* Protobuf type {@code hadoop.hdfs.ProvidedStorageLocationProto}
*/
public static final class ProvidedStorageLocationProto extends
com.google.protobuf.GeneratedMessage
implements ProvidedStorageLocationProtoOrBuilder {
// Use ProvidedStorageLocationProto.newBuilder() to construct.
private ProvidedStorageLocationProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ProvidedStorageLocationProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ProvidedStorageLocationProto defaultInstance;
public static ProvidedStorageLocationProto getDefaultInstance() {
return defaultInstance;
}
public ProvidedStorageLocationProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ProvidedStorageLocationProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
path_ = input.readBytes();
break;
}
case 16: {
bitField0_ |= 0x00000002;
offset_ = input.readInt64();
break;
}
case 24: {
bitField0_ |= 0x00000004;
length_ = input.readInt64();
break;
}
case 34: {
bitField0_ |= 0x00000008;
nonce_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ProvidedStorageLocationProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ProvidedStorageLocationProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ProvidedStorageLocationProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ProvidedStorageLocationProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required string path = 1;
public static final int PATH_FIELD_NUMBER = 1;
private java.lang.Object path_;
/**
* required string path = 1;
*/
public boolean hasPath() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string path = 1;
*/
public java.lang.String getPath() {
java.lang.Object ref = path_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
path_ = s;
}
return s;
}
}
/**
* required string path = 1;
*/
public com.google.protobuf.ByteString
getPathBytes() {
java.lang.Object ref = path_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
path_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required int64 offset = 2;
public static final int OFFSET_FIELD_NUMBER = 2;
private long offset_;
/**
* required int64 offset = 2;
*/
public boolean hasOffset() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required int64 offset = 2;
*/
public long getOffset() {
return offset_;
}
// required int64 length = 3;
public static final int LENGTH_FIELD_NUMBER = 3;
private long length_;
/**
* required int64 length = 3;
*/
public boolean hasLength() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required int64 length = 3;
*/
public long getLength() {
return length_;
}
// required bytes nonce = 4;
public static final int NONCE_FIELD_NUMBER = 4;
private com.google.protobuf.ByteString nonce_;
/**
* required bytes nonce = 4;
*/
public boolean hasNonce() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required bytes nonce = 4;
*/
public com.google.protobuf.ByteString getNonce() {
return nonce_;
}
private void initFields() {
path_ = "";
offset_ = 0L;
length_ = 0L;
nonce_ = com.google.protobuf.ByteString.EMPTY;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasPath()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasOffset()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasLength()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasNonce()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getPathBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeInt64(2, offset_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeInt64(3, length_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeBytes(4, nonce_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getPathBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(2, offset_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(3, length_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(4, nonce_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto) obj;
boolean result = true;
result = result && (hasPath() == other.hasPath());
if (hasPath()) {
result = result && getPath()
.equals(other.getPath());
}
result = result && (hasOffset() == other.hasOffset());
if (hasOffset()) {
result = result && (getOffset()
== other.getOffset());
}
result = result && (hasLength() == other.hasLength());
if (hasLength()) {
result = result && (getLength()
== other.getLength());
}
result = result && (hasNonce() == other.hasNonce());
if (hasNonce()) {
result = result && getNonce()
.equals(other.getNonce());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasPath()) {
hash = (37 * hash) + PATH_FIELD_NUMBER;
hash = (53 * hash) + getPath().hashCode();
}
if (hasOffset()) {
hash = (37 * hash) + OFFSET_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getOffset());
}
if (hasLength()) {
hash = (37 * hash) + LENGTH_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getLength());
}
if (hasNonce()) {
hash = (37 * hash) + NONCE_FIELD_NUMBER;
hash = (53 * hash) + getNonce().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ProvidedStorageLocationProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ProvidedStorageLocationProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ProvidedStorageLocationProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
path_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
offset_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
length_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
nonce_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ProvidedStorageLocationProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.path_ = path_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.offset_ = offset_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.length_ = length_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.nonce_ = nonce_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.getDefaultInstance()) return this;
if (other.hasPath()) {
bitField0_ |= 0x00000001;
path_ = other.path_;
onChanged();
}
if (other.hasOffset()) {
setOffset(other.getOffset());
}
if (other.hasLength()) {
setLength(other.getLength());
}
if (other.hasNonce()) {
setNonce(other.getNonce());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasPath()) {
return false;
}
if (!hasOffset()) {
return false;
}
if (!hasLength()) {
return false;
}
if (!hasNonce()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string path = 1;
private java.lang.Object path_ = "";
/**
* required string path = 1;
*/
public boolean hasPath() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string path = 1;
*/
public java.lang.String getPath() {
java.lang.Object ref = path_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
path_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string path = 1;
*/
public com.google.protobuf.ByteString
getPathBytes() {
java.lang.Object ref = path_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
path_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string path = 1;
*/
public Builder setPath(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
path_ = value;
onChanged();
return this;
}
/**
* required string path = 1;
*/
public Builder clearPath() {
bitField0_ = (bitField0_ & ~0x00000001);
path_ = getDefaultInstance().getPath();
onChanged();
return this;
}
/**
* required string path = 1;
*/
public Builder setPathBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
path_ = value;
onChanged();
return this;
}
// required int64 offset = 2;
private long offset_ ;
/**
* required int64 offset = 2;
*/
public boolean hasOffset() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required int64 offset = 2;
*/
public long getOffset() {
return offset_;
}
/**
* required int64 offset = 2;
*/
public Builder setOffset(long value) {
bitField0_ |= 0x00000002;
offset_ = value;
onChanged();
return this;
}
/**
* required int64 offset = 2;
*/
public Builder clearOffset() {
bitField0_ = (bitField0_ & ~0x00000002);
offset_ = 0L;
onChanged();
return this;
}
// required int64 length = 3;
private long length_ ;
/**
* required int64 length = 3;
*/
public boolean hasLength() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required int64 length = 3;
*/
public long getLength() {
return length_;
}
/**
* required int64 length = 3;
*/
public Builder setLength(long value) {
bitField0_ |= 0x00000004;
length_ = value;
onChanged();
return this;
}
/**
* required int64 length = 3;
*/
public Builder clearLength() {
bitField0_ = (bitField0_ & ~0x00000004);
length_ = 0L;
onChanged();
return this;
}
// required bytes nonce = 4;
private com.google.protobuf.ByteString nonce_ = com.google.protobuf.ByteString.EMPTY;
/**
* required bytes nonce = 4;
*/
public boolean hasNonce() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required bytes nonce = 4;
*/
public com.google.protobuf.ByteString getNonce() {
return nonce_;
}
/**
* required bytes nonce = 4;
*/
public Builder setNonce(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
nonce_ = value;
onChanged();
return this;
}
/**
* required bytes nonce = 4;
*/
public Builder clearNonce() {
bitField0_ = (bitField0_ & ~0x00000008);
nonce_ = getDefaultInstance().getNonce();
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ProvidedStorageLocationProto)
}
static {
defaultInstance = new ProvidedStorageLocationProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ProvidedStorageLocationProto)
}
public interface DatanodeIDProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string ipAddr = 1;
/**
* required string ipAddr = 1;
*
*
* IP address
*
*/
boolean hasIpAddr();
/**
* required string ipAddr = 1;
*
*
* IP address
*
*/
java.lang.String getIpAddr();
/**
* required string ipAddr = 1;
*
*
* IP address
*
*/
com.google.protobuf.ByteString
getIpAddrBytes();
// required string hostName = 2;
/**
* required string hostName = 2;
*
*
* hostname
*
*/
boolean hasHostName();
/**
* required string hostName = 2;
*
*
* hostname
*
*/
java.lang.String getHostName();
/**
* required string hostName = 2;
*
*
* hostname
*
*/
com.google.protobuf.ByteString
getHostNameBytes();
// required string datanodeUuid = 3;
/**
* required string datanodeUuid = 3;
*
*
* UUID assigned to the Datanode. For
*
*/
boolean hasDatanodeUuid();
/**
* required string datanodeUuid = 3;
*
*
* UUID assigned to the Datanode. For
*
*/
java.lang.String getDatanodeUuid();
/**
* required string datanodeUuid = 3;
*
*
* UUID assigned to the Datanode. For
*
*/
com.google.protobuf.ByteString
getDatanodeUuidBytes();
// required uint32 xferPort = 4;
/**
* required uint32 xferPort = 4;
*
*
* upgraded clusters this is the same
* as the original StorageID of the
* Datanode.
*
*/
boolean hasXferPort();
/**
* required uint32 xferPort = 4;
*
*
* upgraded clusters this is the same
* as the original StorageID of the
* Datanode.
*
*/
int getXferPort();
// required uint32 infoPort = 5;
/**
* required uint32 infoPort = 5;
*
*
* datanode http port
*
*/
boolean hasInfoPort();
/**
* required uint32 infoPort = 5;
*
*
* datanode http port
*
*/
int getInfoPort();
// required uint32 ipcPort = 6;
/**
* required uint32 ipcPort = 6;
*
*
* ipc server port
*
*/
boolean hasIpcPort();
/**
* required uint32 ipcPort = 6;
*
*
* ipc server port
*
*/
int getIpcPort();
// optional uint32 infoSecurePort = 7 [default = 0];
/**
* optional uint32 infoSecurePort = 7 [default = 0];
*
*
* datanode https port
*
*/
boolean hasInfoSecurePort();
/**
* optional uint32 infoSecurePort = 7 [default = 0];
*
*
* datanode https port
*
*/
int getInfoSecurePort();
}
/**
* Protobuf type {@code hadoop.hdfs.DatanodeIDProto}
*
*
**
* Identifies a Datanode
*
*/
public static final class DatanodeIDProto extends
com.google.protobuf.GeneratedMessage
implements DatanodeIDProtoOrBuilder {
// Use DatanodeIDProto.newBuilder() to construct.
private DatanodeIDProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private DatanodeIDProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final DatanodeIDProto defaultInstance;
public static DatanodeIDProto getDefaultInstance() {
return defaultInstance;
}
public DatanodeIDProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DatanodeIDProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
ipAddr_ = input.readBytes();
break;
}
case 18: {
bitField0_ |= 0x00000002;
hostName_ = input.readBytes();
break;
}
case 26: {
bitField0_ |= 0x00000004;
datanodeUuid_ = input.readBytes();
break;
}
case 32: {
bitField0_ |= 0x00000008;
xferPort_ = input.readUInt32();
break;
}
case 40: {
bitField0_ |= 0x00000010;
infoPort_ = input.readUInt32();
break;
}
case 48: {
bitField0_ |= 0x00000020;
ipcPort_ = input.readUInt32();
break;
}
case 56: {
bitField0_ |= 0x00000040;
infoSecurePort_ = input.readUInt32();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeIDProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeIDProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public DatanodeIDProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new DatanodeIDProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required string ipAddr = 1;
public static final int IPADDR_FIELD_NUMBER = 1;
private java.lang.Object ipAddr_;
/**
* required string ipAddr = 1;
*
*
* IP address
*
*/
public boolean hasIpAddr() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string ipAddr = 1;
*
*
* IP address
*
*/
public java.lang.String getIpAddr() {
java.lang.Object ref = ipAddr_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
ipAddr_ = s;
}
return s;
}
}
/**
* required string ipAddr = 1;
*
*
* IP address
*
*/
public com.google.protobuf.ByteString
getIpAddrBytes() {
java.lang.Object ref = ipAddr_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
ipAddr_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required string hostName = 2;
public static final int HOSTNAME_FIELD_NUMBER = 2;
private java.lang.Object hostName_;
/**
* required string hostName = 2;
*
*
* hostname
*
*/
public boolean hasHostName() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string hostName = 2;
*
*
* hostname
*
*/
public java.lang.String getHostName() {
java.lang.Object ref = hostName_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
hostName_ = s;
}
return s;
}
}
/**
* required string hostName = 2;
*
*
* hostname
*
*/
public com.google.protobuf.ByteString
getHostNameBytes() {
java.lang.Object ref = hostName_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
hostName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required string datanodeUuid = 3;
public static final int DATANODEUUID_FIELD_NUMBER = 3;
private java.lang.Object datanodeUuid_;
/**
* required string datanodeUuid = 3;
*
*
* UUID assigned to the Datanode. For
*
*/
public boolean hasDatanodeUuid() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required string datanodeUuid = 3;
*
*
* UUID assigned to the Datanode. For
*
*/
public java.lang.String getDatanodeUuid() {
java.lang.Object ref = datanodeUuid_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
datanodeUuid_ = s;
}
return s;
}
}
/**
* required string datanodeUuid = 3;
*
*
* UUID assigned to the Datanode. For
*
*/
public com.google.protobuf.ByteString
getDatanodeUuidBytes() {
java.lang.Object ref = datanodeUuid_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
datanodeUuid_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required uint32 xferPort = 4;
public static final int XFERPORT_FIELD_NUMBER = 4;
private int xferPort_;
/**
* required uint32 xferPort = 4;
*
*
* upgraded clusters this is the same
* as the original StorageID of the
* Datanode.
*
*/
public boolean hasXferPort() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required uint32 xferPort = 4;
*
*
* upgraded clusters this is the same
* as the original StorageID of the
* Datanode.
*
*/
public int getXferPort() {
return xferPort_;
}
// required uint32 infoPort = 5;
public static final int INFOPORT_FIELD_NUMBER = 5;
private int infoPort_;
/**
* required uint32 infoPort = 5;
*
*
* datanode http port
*
*/
public boolean hasInfoPort() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required uint32 infoPort = 5;
*
*
* datanode http port
*
*/
public int getInfoPort() {
return infoPort_;
}
// required uint32 ipcPort = 6;
public static final int IPCPORT_FIELD_NUMBER = 6;
private int ipcPort_;
/**
* required uint32 ipcPort = 6;
*
*
* ipc server port
*
*/
public boolean hasIpcPort() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* required uint32 ipcPort = 6;
*
*
* ipc server port
*
*/
public int getIpcPort() {
return ipcPort_;
}
// optional uint32 infoSecurePort = 7 [default = 0];
public static final int INFOSECUREPORT_FIELD_NUMBER = 7;
private int infoSecurePort_;
/**
* optional uint32 infoSecurePort = 7 [default = 0];
*
*
* datanode https port
*
*/
public boolean hasInfoSecurePort() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional uint32 infoSecurePort = 7 [default = 0];
*
*
* datanode https port
*
*/
public int getInfoSecurePort() {
return infoSecurePort_;
}
private void initFields() {
ipAddr_ = "";
hostName_ = "";
datanodeUuid_ = "";
xferPort_ = 0;
infoPort_ = 0;
ipcPort_ = 0;
infoSecurePort_ = 0;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasIpAddr()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasHostName()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasDatanodeUuid()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasXferPort()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasInfoPort()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasIpcPort()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getIpAddrBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getHostNameBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, getDatanodeUuidBytes());
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt32(4, xferPort_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeUInt32(5, infoPort_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeUInt32(6, ipcPort_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeUInt32(7, infoSecurePort_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getIpAddrBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getHostNameBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, getDatanodeUuidBytes());
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(4, xferPort_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(5, infoPort_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(6, ipcPort_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(7, infoSecurePort_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto) obj;
boolean result = true;
result = result && (hasIpAddr() == other.hasIpAddr());
if (hasIpAddr()) {
result = result && getIpAddr()
.equals(other.getIpAddr());
}
result = result && (hasHostName() == other.hasHostName());
if (hasHostName()) {
result = result && getHostName()
.equals(other.getHostName());
}
result = result && (hasDatanodeUuid() == other.hasDatanodeUuid());
if (hasDatanodeUuid()) {
result = result && getDatanodeUuid()
.equals(other.getDatanodeUuid());
}
result = result && (hasXferPort() == other.hasXferPort());
if (hasXferPort()) {
result = result && (getXferPort()
== other.getXferPort());
}
result = result && (hasInfoPort() == other.hasInfoPort());
if (hasInfoPort()) {
result = result && (getInfoPort()
== other.getInfoPort());
}
result = result && (hasIpcPort() == other.hasIpcPort());
if (hasIpcPort()) {
result = result && (getIpcPort()
== other.getIpcPort());
}
result = result && (hasInfoSecurePort() == other.hasInfoSecurePort());
if (hasInfoSecurePort()) {
result = result && (getInfoSecurePort()
== other.getInfoSecurePort());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasIpAddr()) {
hash = (37 * hash) + IPADDR_FIELD_NUMBER;
hash = (53 * hash) + getIpAddr().hashCode();
}
if (hasHostName()) {
hash = (37 * hash) + HOSTNAME_FIELD_NUMBER;
hash = (53 * hash) + getHostName().hashCode();
}
if (hasDatanodeUuid()) {
hash = (37 * hash) + DATANODEUUID_FIELD_NUMBER;
hash = (53 * hash) + getDatanodeUuid().hashCode();
}
if (hasXferPort()) {
hash = (37 * hash) + XFERPORT_FIELD_NUMBER;
hash = (53 * hash) + getXferPort();
}
if (hasInfoPort()) {
hash = (37 * hash) + INFOPORT_FIELD_NUMBER;
hash = (53 * hash) + getInfoPort();
}
if (hasIpcPort()) {
hash = (37 * hash) + IPCPORT_FIELD_NUMBER;
hash = (53 * hash) + getIpcPort();
}
if (hasInfoSecurePort()) {
hash = (37 * hash) + INFOSECUREPORT_FIELD_NUMBER;
hash = (53 * hash) + getInfoSecurePort();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.DatanodeIDProto}
*
*
**
* Identifies a Datanode
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeIDProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeIDProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
ipAddr_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
hostName_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
datanodeUuid_ = "";
bitField0_ = (bitField0_ & ~0x00000004);
xferPort_ = 0;
bitField0_ = (bitField0_ & ~0x00000008);
infoPort_ = 0;
bitField0_ = (bitField0_ & ~0x00000010);
ipcPort_ = 0;
bitField0_ = (bitField0_ & ~0x00000020);
infoSecurePort_ = 0;
bitField0_ = (bitField0_ & ~0x00000040);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeIDProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.ipAddr_ = ipAddr_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.hostName_ = hostName_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.datanodeUuid_ = datanodeUuid_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.xferPort_ = xferPort_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.infoPort_ = infoPort_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000020;
}
result.ipcPort_ = ipcPort_;
if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
to_bitField0_ |= 0x00000040;
}
result.infoSecurePort_ = infoSecurePort_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()) return this;
if (other.hasIpAddr()) {
bitField0_ |= 0x00000001;
ipAddr_ = other.ipAddr_;
onChanged();
}
if (other.hasHostName()) {
bitField0_ |= 0x00000002;
hostName_ = other.hostName_;
onChanged();
}
if (other.hasDatanodeUuid()) {
bitField0_ |= 0x00000004;
datanodeUuid_ = other.datanodeUuid_;
onChanged();
}
if (other.hasXferPort()) {
setXferPort(other.getXferPort());
}
if (other.hasInfoPort()) {
setInfoPort(other.getInfoPort());
}
if (other.hasIpcPort()) {
setIpcPort(other.getIpcPort());
}
if (other.hasInfoSecurePort()) {
setInfoSecurePort(other.getInfoSecurePort());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasIpAddr()) {
return false;
}
if (!hasHostName()) {
return false;
}
if (!hasDatanodeUuid()) {
return false;
}
if (!hasXferPort()) {
return false;
}
if (!hasInfoPort()) {
return false;
}
if (!hasIpcPort()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string ipAddr = 1;
private java.lang.Object ipAddr_ = "";
/**
* required string ipAddr = 1;
*
*
* IP address
*
*/
public boolean hasIpAddr() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string ipAddr = 1;
*
*
* IP address
*
*/
public java.lang.String getIpAddr() {
java.lang.Object ref = ipAddr_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
ipAddr_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string ipAddr = 1;
*
*
* IP address
*
*/
public com.google.protobuf.ByteString
getIpAddrBytes() {
java.lang.Object ref = ipAddr_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
ipAddr_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string ipAddr = 1;
*
*
* IP address
*
*/
public Builder setIpAddr(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
ipAddr_ = value;
onChanged();
return this;
}
/**
* required string ipAddr = 1;
*
*
* IP address
*
*/
public Builder clearIpAddr() {
bitField0_ = (bitField0_ & ~0x00000001);
ipAddr_ = getDefaultInstance().getIpAddr();
onChanged();
return this;
}
/**
* required string ipAddr = 1;
*
*
* IP address
*
*/
public Builder setIpAddrBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
ipAddr_ = value;
onChanged();
return this;
}
// required string hostName = 2;
private java.lang.Object hostName_ = "";
/**
* required string hostName = 2;
*
*
* hostname
*
*/
public boolean hasHostName() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string hostName = 2;
*
*
* hostname
*
*/
public java.lang.String getHostName() {
java.lang.Object ref = hostName_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
hostName_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string hostName = 2;
*
*
* hostname
*
*/
public com.google.protobuf.ByteString
getHostNameBytes() {
java.lang.Object ref = hostName_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
hostName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string hostName = 2;
*
*
* hostname
*
*/
public Builder setHostName(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
hostName_ = value;
onChanged();
return this;
}
/**
* required string hostName = 2;
*
*
* hostname
*
*/
public Builder clearHostName() {
bitField0_ = (bitField0_ & ~0x00000002);
hostName_ = getDefaultInstance().getHostName();
onChanged();
return this;
}
/**
* required string hostName = 2;
*
*
* hostname
*
*/
public Builder setHostNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
hostName_ = value;
onChanged();
return this;
}
// required string datanodeUuid = 3;
private java.lang.Object datanodeUuid_ = "";
/**
* required string datanodeUuid = 3;
*
*
* UUID assigned to the Datanode. For
*
*/
public boolean hasDatanodeUuid() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required string datanodeUuid = 3;
*
*
* UUID assigned to the Datanode. For
*
*/
public java.lang.String getDatanodeUuid() {
java.lang.Object ref = datanodeUuid_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
datanodeUuid_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string datanodeUuid = 3;
*
*
* UUID assigned to the Datanode. For
*
*/
public com.google.protobuf.ByteString
getDatanodeUuidBytes() {
java.lang.Object ref = datanodeUuid_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
datanodeUuid_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string datanodeUuid = 3;
*
*
* UUID assigned to the Datanode. For
*
*/
public Builder setDatanodeUuid(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
datanodeUuid_ = value;
onChanged();
return this;
}
/**
* required string datanodeUuid = 3;
*
*
* UUID assigned to the Datanode. For
*
*/
public Builder clearDatanodeUuid() {
bitField0_ = (bitField0_ & ~0x00000004);
datanodeUuid_ = getDefaultInstance().getDatanodeUuid();
onChanged();
return this;
}
/**
* required string datanodeUuid = 3;
*
*
* UUID assigned to the Datanode. For
*
*/
public Builder setDatanodeUuidBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
datanodeUuid_ = value;
onChanged();
return this;
}
// required uint32 xferPort = 4;
private int xferPort_ ;
/**
* required uint32 xferPort = 4;
*
*
* upgraded clusters this is the same
* as the original StorageID of the
* Datanode.
*
*/
public boolean hasXferPort() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required uint32 xferPort = 4;
*
*
* upgraded clusters this is the same
* as the original StorageID of the
* Datanode.
*
*/
public int getXferPort() {
return xferPort_;
}
/**
* required uint32 xferPort = 4;
*
*
* upgraded clusters this is the same
* as the original StorageID of the
* Datanode.
*
*/
public Builder setXferPort(int value) {
bitField0_ |= 0x00000008;
xferPort_ = value;
onChanged();
return this;
}
/**
* required uint32 xferPort = 4;
*
*
* upgraded clusters this is the same
* as the original StorageID of the
* Datanode.
*
*/
public Builder clearXferPort() {
bitField0_ = (bitField0_ & ~0x00000008);
xferPort_ = 0;
onChanged();
return this;
}
// required uint32 infoPort = 5;
private int infoPort_ ;
/**
* required uint32 infoPort = 5;
*
*
* datanode http port
*
*/
public boolean hasInfoPort() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required uint32 infoPort = 5;
*
*
* datanode http port
*
*/
public int getInfoPort() {
return infoPort_;
}
/**
* required uint32 infoPort = 5;
*
*
* datanode http port
*
*/
public Builder setInfoPort(int value) {
bitField0_ |= 0x00000010;
infoPort_ = value;
onChanged();
return this;
}
/**
* required uint32 infoPort = 5;
*
*
* datanode http port
*
*/
public Builder clearInfoPort() {
bitField0_ = (bitField0_ & ~0x00000010);
infoPort_ = 0;
onChanged();
return this;
}
// required uint32 ipcPort = 6;
private int ipcPort_ ;
/**
* required uint32 ipcPort = 6;
*
*
* ipc server port
*
*/
public boolean hasIpcPort() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* required uint32 ipcPort = 6;
*
*
* ipc server port
*
*/
public int getIpcPort() {
return ipcPort_;
}
/**
* required uint32 ipcPort = 6;
*
*
* ipc server port
*
*/
public Builder setIpcPort(int value) {
bitField0_ |= 0x00000020;
ipcPort_ = value;
onChanged();
return this;
}
/**
* required uint32 ipcPort = 6;
*
*
* ipc server port
*
*/
public Builder clearIpcPort() {
bitField0_ = (bitField0_ & ~0x00000020);
ipcPort_ = 0;
onChanged();
return this;
}
// optional uint32 infoSecurePort = 7 [default = 0];
private int infoSecurePort_ ;
/**
* optional uint32 infoSecurePort = 7 [default = 0];
*
*
* datanode https port
*
*/
public boolean hasInfoSecurePort() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional uint32 infoSecurePort = 7 [default = 0];
*
*
* datanode https port
*
*/
public int getInfoSecurePort() {
return infoSecurePort_;
}
/**
* optional uint32 infoSecurePort = 7 [default = 0];
*
*
* datanode https port
*
*/
public Builder setInfoSecurePort(int value) {
bitField0_ |= 0x00000040;
infoSecurePort_ = value;
onChanged();
return this;
}
/**
* optional uint32 infoSecurePort = 7 [default = 0];
*
*
* datanode https port
*
*/
public Builder clearInfoSecurePort() {
bitField0_ = (bitField0_ & ~0x00000040);
infoSecurePort_ = 0;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeIDProto)
}
static {
defaultInstance = new DatanodeIDProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeIDProto)
}
public interface DatanodeLocalInfoProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string softwareVersion = 1;
/**
* required string softwareVersion = 1;
*/
boolean hasSoftwareVersion();
/**
* required string softwareVersion = 1;
*/
java.lang.String getSoftwareVersion();
/**
* required string softwareVersion = 1;
*/
com.google.protobuf.ByteString
getSoftwareVersionBytes();
// required string configVersion = 2;
/**
* required string configVersion = 2;
*/
boolean hasConfigVersion();
/**
* required string configVersion = 2;
*/
java.lang.String getConfigVersion();
/**
* required string configVersion = 2;
*/
com.google.protobuf.ByteString
getConfigVersionBytes();
// required uint64 uptime = 3;
/**
* required uint64 uptime = 3;
*/
boolean hasUptime();
/**
* required uint64 uptime = 3;
*/
long getUptime();
}
/**
* Protobuf type {@code hadoop.hdfs.DatanodeLocalInfoProto}
*
*
**
* Datanode local information
*
*/
public static final class DatanodeLocalInfoProto extends
com.google.protobuf.GeneratedMessage
implements DatanodeLocalInfoProtoOrBuilder {
// Use DatanodeLocalInfoProto.newBuilder() to construct.
private DatanodeLocalInfoProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private DatanodeLocalInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final DatanodeLocalInfoProto defaultInstance;
public static DatanodeLocalInfoProto getDefaultInstance() {
return defaultInstance;
}
public DatanodeLocalInfoProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DatanodeLocalInfoProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
softwareVersion_ = input.readBytes();
break;
}
case 18: {
bitField0_ |= 0x00000002;
configVersion_ = input.readBytes();
break;
}
case 24: {
bitField0_ |= 0x00000004;
uptime_ = input.readUInt64();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeLocalInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeLocalInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public DatanodeLocalInfoProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new DatanodeLocalInfoProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required string softwareVersion = 1;
public static final int SOFTWAREVERSION_FIELD_NUMBER = 1;
private java.lang.Object softwareVersion_;
/**
* required string softwareVersion = 1;
*/
public boolean hasSoftwareVersion() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string softwareVersion = 1;
*/
public java.lang.String getSoftwareVersion() {
java.lang.Object ref = softwareVersion_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
softwareVersion_ = s;
}
return s;
}
}
/**
* required string softwareVersion = 1;
*/
public com.google.protobuf.ByteString
getSoftwareVersionBytes() {
java.lang.Object ref = softwareVersion_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
softwareVersion_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required string configVersion = 2;
public static final int CONFIGVERSION_FIELD_NUMBER = 2;
private java.lang.Object configVersion_;
/**
* required string configVersion = 2;
*/
public boolean hasConfigVersion() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string configVersion = 2;
*/
public java.lang.String getConfigVersion() {
java.lang.Object ref = configVersion_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
configVersion_ = s;
}
return s;
}
}
/**
* required string configVersion = 2;
*/
public com.google.protobuf.ByteString
getConfigVersionBytes() {
java.lang.Object ref = configVersion_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
configVersion_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required uint64 uptime = 3;
public static final int UPTIME_FIELD_NUMBER = 3;
private long uptime_;
/**
* required uint64 uptime = 3;
*/
public boolean hasUptime() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 uptime = 3;
*/
public long getUptime() {
return uptime_;
}
private void initFields() {
softwareVersion_ = "";
configVersion_ = "";
uptime_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasSoftwareVersion()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasConfigVersion()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasUptime()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getSoftwareVersionBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getConfigVersionBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt64(3, uptime_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getSoftwareVersionBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getConfigVersionBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(3, uptime_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto) obj;
boolean result = true;
result = result && (hasSoftwareVersion() == other.hasSoftwareVersion());
if (hasSoftwareVersion()) {
result = result && getSoftwareVersion()
.equals(other.getSoftwareVersion());
}
result = result && (hasConfigVersion() == other.hasConfigVersion());
if (hasConfigVersion()) {
result = result && getConfigVersion()
.equals(other.getConfigVersion());
}
result = result && (hasUptime() == other.hasUptime());
if (hasUptime()) {
result = result && (getUptime()
== other.getUptime());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasSoftwareVersion()) {
hash = (37 * hash) + SOFTWAREVERSION_FIELD_NUMBER;
hash = (53 * hash) + getSoftwareVersion().hashCode();
}
if (hasConfigVersion()) {
hash = (37 * hash) + CONFIGVERSION_FIELD_NUMBER;
hash = (53 * hash) + getConfigVersion().hashCode();
}
if (hasUptime()) {
hash = (37 * hash) + UPTIME_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getUptime());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.DatanodeLocalInfoProto}
*
*
**
* Datanode local information
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeLocalInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeLocalInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
softwareVersion_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
configVersion_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
uptime_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeLocalInfoProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.softwareVersion_ = softwareVersion_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.configVersion_ = configVersion_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.uptime_ = uptime_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.getDefaultInstance()) return this;
if (other.hasSoftwareVersion()) {
bitField0_ |= 0x00000001;
softwareVersion_ = other.softwareVersion_;
onChanged();
}
if (other.hasConfigVersion()) {
bitField0_ |= 0x00000002;
configVersion_ = other.configVersion_;
onChanged();
}
if (other.hasUptime()) {
setUptime(other.getUptime());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasSoftwareVersion()) {
return false;
}
if (!hasConfigVersion()) {
return false;
}
if (!hasUptime()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string softwareVersion = 1;
private java.lang.Object softwareVersion_ = "";
/**
* required string softwareVersion = 1;
*/
public boolean hasSoftwareVersion() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string softwareVersion = 1;
*/
public java.lang.String getSoftwareVersion() {
java.lang.Object ref = softwareVersion_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
softwareVersion_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string softwareVersion = 1;
*/
public com.google.protobuf.ByteString
getSoftwareVersionBytes() {
java.lang.Object ref = softwareVersion_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
softwareVersion_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string softwareVersion = 1;
*/
public Builder setSoftwareVersion(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
softwareVersion_ = value;
onChanged();
return this;
}
/**
* required string softwareVersion = 1;
*/
public Builder clearSoftwareVersion() {
bitField0_ = (bitField0_ & ~0x00000001);
softwareVersion_ = getDefaultInstance().getSoftwareVersion();
onChanged();
return this;
}
/**
* required string softwareVersion = 1;
*/
public Builder setSoftwareVersionBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
softwareVersion_ = value;
onChanged();
return this;
}
// required string configVersion = 2;
private java.lang.Object configVersion_ = "";
/**
* required string configVersion = 2;
*/
public boolean hasConfigVersion() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string configVersion = 2;
*/
public java.lang.String getConfigVersion() {
java.lang.Object ref = configVersion_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
configVersion_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string configVersion = 2;
*/
public com.google.protobuf.ByteString
getConfigVersionBytes() {
java.lang.Object ref = configVersion_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
configVersion_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string configVersion = 2;
*/
public Builder setConfigVersion(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
configVersion_ = value;
onChanged();
return this;
}
/**
* required string configVersion = 2;
*/
public Builder clearConfigVersion() {
bitField0_ = (bitField0_ & ~0x00000002);
configVersion_ = getDefaultInstance().getConfigVersion();
onChanged();
return this;
}
/**
* required string configVersion = 2;
*/
public Builder setConfigVersionBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
configVersion_ = value;
onChanged();
return this;
}
// required uint64 uptime = 3;
private long uptime_ ;
/**
* required uint64 uptime = 3;
*/
public boolean hasUptime() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 uptime = 3;
*/
public long getUptime() {
return uptime_;
}
/**
* required uint64 uptime = 3;
*/
public Builder setUptime(long value) {
bitField0_ |= 0x00000004;
uptime_ = value;
onChanged();
return this;
}
/**
* required uint64 uptime = 3;
*/
public Builder clearUptime() {
bitField0_ = (bitField0_ & ~0x00000004);
uptime_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeLocalInfoProto)
}
static {
defaultInstance = new DatanodeLocalInfoProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeLocalInfoProto)
}
public interface DatanodeVolumeInfoProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string path = 1;
/**
* required string path = 1;
*/
boolean hasPath();
/**
* required string path = 1;
*/
java.lang.String getPath();
/**
* required string path = 1;
*/
com.google.protobuf.ByteString
getPathBytes();
// required .hadoop.hdfs.StorageTypeProto storageType = 2;
/**
* required .hadoop.hdfs.StorageTypeProto storageType = 2;
*/
boolean hasStorageType();
/**
* required .hadoop.hdfs.StorageTypeProto storageType = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType();
// required uint64 usedSpace = 3;
/**
* required uint64 usedSpace = 3;
*/
boolean hasUsedSpace();
/**
* required uint64 usedSpace = 3;
*/
long getUsedSpace();
// required uint64 freeSpace = 4;
/**
* required uint64 freeSpace = 4;
*/
boolean hasFreeSpace();
/**
* required uint64 freeSpace = 4;
*/
long getFreeSpace();
// required uint64 reservedSpace = 5;
/**
* required uint64 reservedSpace = 5;
*/
boolean hasReservedSpace();
/**
* required uint64 reservedSpace = 5;
*/
long getReservedSpace();
// required uint64 reservedSpaceForReplicas = 6;
/**
* required uint64 reservedSpaceForReplicas = 6;
*/
boolean hasReservedSpaceForReplicas();
/**
* required uint64 reservedSpaceForReplicas = 6;
*/
long getReservedSpaceForReplicas();
// required uint64 numBlocks = 7;
/**
* required uint64 numBlocks = 7;
*/
boolean hasNumBlocks();
/**
* required uint64 numBlocks = 7;
*/
long getNumBlocks();
}
/**
* Protobuf type {@code hadoop.hdfs.DatanodeVolumeInfoProto}
*
*
**
* Datanode volume information
*
*/
public static final class DatanodeVolumeInfoProto extends
com.google.protobuf.GeneratedMessage
implements DatanodeVolumeInfoProtoOrBuilder {
// Use DatanodeVolumeInfoProto.newBuilder() to construct.
private DatanodeVolumeInfoProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private DatanodeVolumeInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final DatanodeVolumeInfoProto defaultInstance;
public static DatanodeVolumeInfoProto getDefaultInstance() {
return defaultInstance;
}
public DatanodeVolumeInfoProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DatanodeVolumeInfoProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
path_ = input.readBytes();
break;
}
case 16: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(2, rawValue);
} else {
bitField0_ |= 0x00000002;
storageType_ = value;
}
break;
}
case 24: {
bitField0_ |= 0x00000004;
usedSpace_ = input.readUInt64();
break;
}
case 32: {
bitField0_ |= 0x00000008;
freeSpace_ = input.readUInt64();
break;
}
case 40: {
bitField0_ |= 0x00000010;
reservedSpace_ = input.readUInt64();
break;
}
case 48: {
bitField0_ |= 0x00000020;
reservedSpaceForReplicas_ = input.readUInt64();
break;
}
case 56: {
bitField0_ |= 0x00000040;
numBlocks_ = input.readUInt64();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public DatanodeVolumeInfoProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new DatanodeVolumeInfoProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required string path = 1;
public static final int PATH_FIELD_NUMBER = 1;
private java.lang.Object path_;
/**
* required string path = 1;
*/
public boolean hasPath() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string path = 1;
*/
public java.lang.String getPath() {
java.lang.Object ref = path_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
path_ = s;
}
return s;
}
}
/**
* required string path = 1;
*/
public com.google.protobuf.ByteString
getPathBytes() {
java.lang.Object ref = path_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
path_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required .hadoop.hdfs.StorageTypeProto storageType = 2;
public static final int STORAGETYPE_FIELD_NUMBER = 2;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto storageType_;
/**
* required .hadoop.hdfs.StorageTypeProto storageType = 2;
*/
public boolean hasStorageType() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required .hadoop.hdfs.StorageTypeProto storageType = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() {
return storageType_;
}
// required uint64 usedSpace = 3;
public static final int USEDSPACE_FIELD_NUMBER = 3;
private long usedSpace_;
/**
* required uint64 usedSpace = 3;
*/
public boolean hasUsedSpace() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 usedSpace = 3;
*/
public long getUsedSpace() {
return usedSpace_;
}
// required uint64 freeSpace = 4;
public static final int FREESPACE_FIELD_NUMBER = 4;
private long freeSpace_;
/**
* required uint64 freeSpace = 4;
*/
public boolean hasFreeSpace() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required uint64 freeSpace = 4;
*/
public long getFreeSpace() {
return freeSpace_;
}
// required uint64 reservedSpace = 5;
public static final int RESERVEDSPACE_FIELD_NUMBER = 5;
private long reservedSpace_;
/**
* required uint64 reservedSpace = 5;
*/
public boolean hasReservedSpace() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required uint64 reservedSpace = 5;
*/
public long getReservedSpace() {
return reservedSpace_;
}
// required uint64 reservedSpaceForReplicas = 6;
public static final int RESERVEDSPACEFORREPLICAS_FIELD_NUMBER = 6;
private long reservedSpaceForReplicas_;
/**
* required uint64 reservedSpaceForReplicas = 6;
*/
public boolean hasReservedSpaceForReplicas() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* required uint64 reservedSpaceForReplicas = 6;
*/
public long getReservedSpaceForReplicas() {
return reservedSpaceForReplicas_;
}
// required uint64 numBlocks = 7;
public static final int NUMBLOCKS_FIELD_NUMBER = 7;
private long numBlocks_;
/**
* required uint64 numBlocks = 7;
*/
public boolean hasNumBlocks() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* required uint64 numBlocks = 7;
*/
public long getNumBlocks() {
return numBlocks_;
}
private void initFields() {
path_ = "";
storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
usedSpace_ = 0L;
freeSpace_ = 0L;
reservedSpace_ = 0L;
reservedSpaceForReplicas_ = 0L;
numBlocks_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasPath()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasStorageType()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasUsedSpace()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasFreeSpace()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasReservedSpace()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasReservedSpaceForReplicas()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasNumBlocks()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getPathBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeEnum(2, storageType_.getNumber());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt64(3, usedSpace_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt64(4, freeSpace_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeUInt64(5, reservedSpace_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeUInt64(6, reservedSpaceForReplicas_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeUInt64(7, numBlocks_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getPathBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(2, storageType_.getNumber());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(3, usedSpace_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(4, freeSpace_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(5, reservedSpace_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(6, reservedSpaceForReplicas_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(7, numBlocks_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto) obj;
boolean result = true;
result = result && (hasPath() == other.hasPath());
if (hasPath()) {
result = result && getPath()
.equals(other.getPath());
}
result = result && (hasStorageType() == other.hasStorageType());
if (hasStorageType()) {
result = result &&
(getStorageType() == other.getStorageType());
}
result = result && (hasUsedSpace() == other.hasUsedSpace());
if (hasUsedSpace()) {
result = result && (getUsedSpace()
== other.getUsedSpace());
}
result = result && (hasFreeSpace() == other.hasFreeSpace());
if (hasFreeSpace()) {
result = result && (getFreeSpace()
== other.getFreeSpace());
}
result = result && (hasReservedSpace() == other.hasReservedSpace());
if (hasReservedSpace()) {
result = result && (getReservedSpace()
== other.getReservedSpace());
}
result = result && (hasReservedSpaceForReplicas() == other.hasReservedSpaceForReplicas());
if (hasReservedSpaceForReplicas()) {
result = result && (getReservedSpaceForReplicas()
== other.getReservedSpaceForReplicas());
}
result = result && (hasNumBlocks() == other.hasNumBlocks());
if (hasNumBlocks()) {
result = result && (getNumBlocks()
== other.getNumBlocks());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasPath()) {
hash = (37 * hash) + PATH_FIELD_NUMBER;
hash = (53 * hash) + getPath().hashCode();
}
if (hasStorageType()) {
hash = (37 * hash) + STORAGETYPE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getStorageType());
}
if (hasUsedSpace()) {
hash = (37 * hash) + USEDSPACE_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getUsedSpace());
}
if (hasFreeSpace()) {
hash = (37 * hash) + FREESPACE_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getFreeSpace());
}
if (hasReservedSpace()) {
hash = (37 * hash) + RESERVEDSPACE_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getReservedSpace());
}
if (hasReservedSpaceForReplicas()) {
hash = (37 * hash) + RESERVEDSPACEFORREPLICAS_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getReservedSpaceForReplicas());
}
if (hasNumBlocks()) {
hash = (37 * hash) + NUMBLOCKS_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getNumBlocks());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.DatanodeVolumeInfoProto}
*
*
**
* Datanode volume information
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
path_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
bitField0_ = (bitField0_ & ~0x00000002);
usedSpace_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
freeSpace_ = 0L;
bitField0_ = (bitField0_ & ~0x00000008);
reservedSpace_ = 0L;
bitField0_ = (bitField0_ & ~0x00000010);
reservedSpaceForReplicas_ = 0L;
bitField0_ = (bitField0_ & ~0x00000020);
numBlocks_ = 0L;
bitField0_ = (bitField0_ & ~0x00000040);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.path_ = path_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.storageType_ = storageType_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.usedSpace_ = usedSpace_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.freeSpace_ = freeSpace_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.reservedSpace_ = reservedSpace_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000020;
}
result.reservedSpaceForReplicas_ = reservedSpaceForReplicas_;
if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
to_bitField0_ |= 0x00000040;
}
result.numBlocks_ = numBlocks_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.getDefaultInstance()) return this;
if (other.hasPath()) {
bitField0_ |= 0x00000001;
path_ = other.path_;
onChanged();
}
if (other.hasStorageType()) {
setStorageType(other.getStorageType());
}
if (other.hasUsedSpace()) {
setUsedSpace(other.getUsedSpace());
}
if (other.hasFreeSpace()) {
setFreeSpace(other.getFreeSpace());
}
if (other.hasReservedSpace()) {
setReservedSpace(other.getReservedSpace());
}
if (other.hasReservedSpaceForReplicas()) {
setReservedSpaceForReplicas(other.getReservedSpaceForReplicas());
}
if (other.hasNumBlocks()) {
setNumBlocks(other.getNumBlocks());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasPath()) {
return false;
}
if (!hasStorageType()) {
return false;
}
if (!hasUsedSpace()) {
return false;
}
if (!hasFreeSpace()) {
return false;
}
if (!hasReservedSpace()) {
return false;
}
if (!hasReservedSpaceForReplicas()) {
return false;
}
if (!hasNumBlocks()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string path = 1;
private java.lang.Object path_ = "";
/**
* required string path = 1;
*/
public boolean hasPath() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string path = 1;
*/
public java.lang.String getPath() {
java.lang.Object ref = path_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
path_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string path = 1;
*/
public com.google.protobuf.ByteString
getPathBytes() {
java.lang.Object ref = path_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
path_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string path = 1;
*/
public Builder setPath(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
path_ = value;
onChanged();
return this;
}
/**
* required string path = 1;
*/
public Builder clearPath() {
bitField0_ = (bitField0_ & ~0x00000001);
path_ = getDefaultInstance().getPath();
onChanged();
return this;
}
/**
* required string path = 1;
*/
public Builder setPathBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
path_ = value;
onChanged();
return this;
}
// required .hadoop.hdfs.StorageTypeProto storageType = 2;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
/**
* required .hadoop.hdfs.StorageTypeProto storageType = 2;
*/
public boolean hasStorageType() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required .hadoop.hdfs.StorageTypeProto storageType = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() {
return storageType_;
}
/**
* required .hadoop.hdfs.StorageTypeProto storageType = 2;
*/
public Builder setStorageType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
storageType_ = value;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.StorageTypeProto storageType = 2;
*/
public Builder clearStorageType() {
bitField0_ = (bitField0_ & ~0x00000002);
storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
onChanged();
return this;
}
// required uint64 usedSpace = 3;
private long usedSpace_ ;
/**
* required uint64 usedSpace = 3;
*/
public boolean hasUsedSpace() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 usedSpace = 3;
*/
public long getUsedSpace() {
return usedSpace_;
}
/**
* required uint64 usedSpace = 3;
*/
public Builder setUsedSpace(long value) {
bitField0_ |= 0x00000004;
usedSpace_ = value;
onChanged();
return this;
}
/**
* required uint64 usedSpace = 3;
*/
public Builder clearUsedSpace() {
bitField0_ = (bitField0_ & ~0x00000004);
usedSpace_ = 0L;
onChanged();
return this;
}
// required uint64 freeSpace = 4;
private long freeSpace_ ;
/**
* required uint64 freeSpace = 4;
*/
public boolean hasFreeSpace() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required uint64 freeSpace = 4;
*/
public long getFreeSpace() {
return freeSpace_;
}
/**
* required uint64 freeSpace = 4;
*/
public Builder setFreeSpace(long value) {
bitField0_ |= 0x00000008;
freeSpace_ = value;
onChanged();
return this;
}
/**
* required uint64 freeSpace = 4;
*/
public Builder clearFreeSpace() {
bitField0_ = (bitField0_ & ~0x00000008);
freeSpace_ = 0L;
onChanged();
return this;
}
// required uint64 reservedSpace = 5;
private long reservedSpace_ ;
/**
* required uint64 reservedSpace = 5;
*/
public boolean hasReservedSpace() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required uint64 reservedSpace = 5;
*/
public long getReservedSpace() {
return reservedSpace_;
}
/**
* required uint64 reservedSpace = 5;
*/
public Builder setReservedSpace(long value) {
bitField0_ |= 0x00000010;
reservedSpace_ = value;
onChanged();
return this;
}
/**
* required uint64 reservedSpace = 5;
*/
public Builder clearReservedSpace() {
bitField0_ = (bitField0_ & ~0x00000010);
reservedSpace_ = 0L;
onChanged();
return this;
}
// required uint64 reservedSpaceForReplicas = 6;
private long reservedSpaceForReplicas_ ;
/**
* required uint64 reservedSpaceForReplicas = 6;
*/
public boolean hasReservedSpaceForReplicas() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* required uint64 reservedSpaceForReplicas = 6;
*/
public long getReservedSpaceForReplicas() {
return reservedSpaceForReplicas_;
}
/**
* required uint64 reservedSpaceForReplicas = 6;
*/
public Builder setReservedSpaceForReplicas(long value) {
bitField0_ |= 0x00000020;
reservedSpaceForReplicas_ = value;
onChanged();
return this;
}
/**
* required uint64 reservedSpaceForReplicas = 6;
*/
public Builder clearReservedSpaceForReplicas() {
bitField0_ = (bitField0_ & ~0x00000020);
reservedSpaceForReplicas_ = 0L;
onChanged();
return this;
}
// required uint64 numBlocks = 7;
private long numBlocks_ ;
/**
* required uint64 numBlocks = 7;
*/
public boolean hasNumBlocks() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* required uint64 numBlocks = 7;
*/
public long getNumBlocks() {
return numBlocks_;
}
/**
* required uint64 numBlocks = 7;
*/
public Builder setNumBlocks(long value) {
bitField0_ |= 0x00000040;
numBlocks_ = value;
onChanged();
return this;
}
/**
* required uint64 numBlocks = 7;
*/
public Builder clearNumBlocks() {
bitField0_ = (bitField0_ & ~0x00000040);
numBlocks_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeVolumeInfoProto)
}
static {
defaultInstance = new DatanodeVolumeInfoProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeVolumeInfoProto)
}
public interface DatanodeInfosProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
java.util.List
getDatanodesList();
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDatanodes(int index);
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
int getDatanodesCount();
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getDatanodesOrBuilderList();
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getDatanodesOrBuilder(
int index);
}
/**
* Protobuf type {@code hadoop.hdfs.DatanodeInfosProto}
*
*
**
* DatanodeInfo array
*
*/
public static final class DatanodeInfosProto extends
com.google.protobuf.GeneratedMessage
implements DatanodeInfosProtoOrBuilder {
// Use DatanodeInfosProto.newBuilder() to construct.
private DatanodeInfosProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private DatanodeInfosProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final DatanodeInfosProto defaultInstance;
public static DatanodeInfosProto getDefaultInstance() {
return defaultInstance;
}
public DatanodeInfosProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DatanodeInfosProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
datanodes_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
datanodes_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.PARSER, extensionRegistry));
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
datanodes_ = java.util.Collections.unmodifiableList(datanodes_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfosProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfosProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public DatanodeInfosProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new DatanodeInfosProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
// repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
public static final int DATANODES_FIELD_NUMBER = 1;
private java.util.List datanodes_;
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public java.util.List getDatanodesList() {
return datanodes_;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getDatanodesOrBuilderList() {
return datanodes_;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public int getDatanodesCount() {
return datanodes_.size();
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDatanodes(int index) {
return datanodes_.get(index);
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getDatanodesOrBuilder(
int index) {
return datanodes_.get(index);
}
private void initFields() {
datanodes_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
for (int i = 0; i < getDatanodesCount(); i++) {
if (!getDatanodes(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < datanodes_.size(); i++) {
output.writeMessage(1, datanodes_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
for (int i = 0; i < datanodes_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, datanodes_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto) obj;
boolean result = true;
result = result && getDatanodesList()
.equals(other.getDatanodesList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (getDatanodesCount() > 0) {
hash = (37 * hash) + DATANODES_FIELD_NUMBER;
hash = (53 * hash) + getDatanodesList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.DatanodeInfosProto}
*
*
**
* DatanodeInfo array
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfosProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfosProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getDatanodesFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (datanodesBuilder_ == null) {
datanodes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
} else {
datanodesBuilder_.clear();
}
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfosProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto(this);
int from_bitField0_ = bitField0_;
if (datanodesBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001)) {
datanodes_ = java.util.Collections.unmodifiableList(datanodes_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.datanodes_ = datanodes_;
} else {
result.datanodes_ = datanodesBuilder_.build();
}
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance()) return this;
if (datanodesBuilder_ == null) {
if (!other.datanodes_.isEmpty()) {
if (datanodes_.isEmpty()) {
datanodes_ = other.datanodes_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureDatanodesIsMutable();
datanodes_.addAll(other.datanodes_);
}
onChanged();
}
} else {
if (!other.datanodes_.isEmpty()) {
if (datanodesBuilder_.isEmpty()) {
datanodesBuilder_.dispose();
datanodesBuilder_ = null;
datanodes_ = other.datanodes_;
bitField0_ = (bitField0_ & ~0x00000001);
datanodesBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getDatanodesFieldBuilder() : null;
} else {
datanodesBuilder_.addAllMessages(other.datanodes_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
for (int i = 0; i < getDatanodesCount(); i++) {
if (!getDatanodes(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
private java.util.List datanodes_ =
java.util.Collections.emptyList();
private void ensureDatanodesIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
datanodes_ = new java.util.ArrayList(datanodes_);
bitField0_ |= 0x00000001;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> datanodesBuilder_;
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public java.util.List getDatanodesList() {
if (datanodesBuilder_ == null) {
return java.util.Collections.unmodifiableList(datanodes_);
} else {
return datanodesBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public int getDatanodesCount() {
if (datanodesBuilder_ == null) {
return datanodes_.size();
} else {
return datanodesBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDatanodes(int index) {
if (datanodesBuilder_ == null) {
return datanodes_.get(index);
} else {
return datanodesBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public Builder setDatanodes(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (datanodesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureDatanodesIsMutable();
datanodes_.set(index, value);
onChanged();
} else {
datanodesBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public Builder setDatanodes(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (datanodesBuilder_ == null) {
ensureDatanodesIsMutable();
datanodes_.set(index, builderForValue.build());
onChanged();
} else {
datanodesBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public Builder addDatanodes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (datanodesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureDatanodesIsMutable();
datanodes_.add(value);
onChanged();
} else {
datanodesBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public Builder addDatanodes(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (datanodesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureDatanodesIsMutable();
datanodes_.add(index, value);
onChanged();
} else {
datanodesBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public Builder addDatanodes(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (datanodesBuilder_ == null) {
ensureDatanodesIsMutable();
datanodes_.add(builderForValue.build());
onChanged();
} else {
datanodesBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public Builder addDatanodes(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (datanodesBuilder_ == null) {
ensureDatanodesIsMutable();
datanodes_.add(index, builderForValue.build());
onChanged();
} else {
datanodesBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public Builder addAllDatanodes(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> values) {
if (datanodesBuilder_ == null) {
ensureDatanodesIsMutable();
super.addAll(values, datanodes_);
onChanged();
} else {
datanodesBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public Builder clearDatanodes() {
if (datanodesBuilder_ == null) {
datanodes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
} else {
datanodesBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public Builder removeDatanodes(int index) {
if (datanodesBuilder_ == null) {
ensureDatanodesIsMutable();
datanodes_.remove(index);
onChanged();
} else {
datanodesBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getDatanodesBuilder(
int index) {
return getDatanodesFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getDatanodesOrBuilder(
int index) {
if (datanodesBuilder_ == null) {
return datanodes_.get(index); } else {
return datanodesBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getDatanodesOrBuilderList() {
if (datanodesBuilder_ != null) {
return datanodesBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(datanodes_);
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addDatanodesBuilder() {
return getDatanodesFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addDatanodesBuilder(
int index) {
return getDatanodesFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;
*/
public java.util.List
getDatanodesBuilderList() {
return getDatanodesFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getDatanodesFieldBuilder() {
if (datanodesBuilder_ == null) {
datanodesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>(
datanodes_,
((bitField0_ & 0x00000001) == 0x00000001),
getParentForChildren(),
isClean());
datanodes_ = null;
}
return datanodesBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeInfosProto)
}
static {
defaultInstance = new DatanodeInfosProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeInfosProto)
}
public interface DatanodeInfoProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.DatanodeIDProto id = 1;
/**
* required .hadoop.hdfs.DatanodeIDProto id = 1;
*/
boolean hasId();
/**
* required .hadoop.hdfs.DatanodeIDProto id = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getId();
/**
* required .hadoop.hdfs.DatanodeIDProto id = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getIdOrBuilder();
// optional uint64 capacity = 2 [default = 0];
/**
* optional uint64 capacity = 2 [default = 0];
*/
boolean hasCapacity();
/**
* optional uint64 capacity = 2 [default = 0];
*/
long getCapacity();
// optional uint64 dfsUsed = 3 [default = 0];
/**
* optional uint64 dfsUsed = 3 [default = 0];
*/
boolean hasDfsUsed();
/**
* optional uint64 dfsUsed = 3 [default = 0];
*/
long getDfsUsed();
// optional uint64 remaining = 4 [default = 0];
/**
* optional uint64 remaining = 4 [default = 0];
*/
boolean hasRemaining();
/**
* optional uint64 remaining = 4 [default = 0];
*/
long getRemaining();
// optional uint64 blockPoolUsed = 5 [default = 0];
/**
* optional uint64 blockPoolUsed = 5 [default = 0];
*/
boolean hasBlockPoolUsed();
/**
* optional uint64 blockPoolUsed = 5 [default = 0];
*/
long getBlockPoolUsed();
// optional uint64 lastUpdate = 6 [default = 0];
/**
* optional uint64 lastUpdate = 6 [default = 0];
*/
boolean hasLastUpdate();
/**
* optional uint64 lastUpdate = 6 [default = 0];
*/
long getLastUpdate();
// optional uint32 xceiverCount = 7 [default = 0];
/**
* optional uint32 xceiverCount = 7 [default = 0];
*/
boolean hasXceiverCount();
/**
* optional uint32 xceiverCount = 7 [default = 0];
*/
int getXceiverCount();
// optional string location = 8;
/**
* optional string location = 8;
*/
boolean hasLocation();
/**
* optional string location = 8;
*/
java.lang.String getLocation();
/**
* optional string location = 8;
*/
com.google.protobuf.ByteString
getLocationBytes();
// optional uint64 nonDfsUsed = 9;
/**
* optional uint64 nonDfsUsed = 9;
*/
boolean hasNonDfsUsed();
/**
* optional uint64 nonDfsUsed = 9;
*/
long getNonDfsUsed();
// optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL];
/**
* optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL];
*/
boolean hasAdminState();
/**
* optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL];
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState getAdminState();
// optional uint64 cacheCapacity = 11 [default = 0];
/**
* optional uint64 cacheCapacity = 11 [default = 0];
*/
boolean hasCacheCapacity();
/**
* optional uint64 cacheCapacity = 11 [default = 0];
*/
long getCacheCapacity();
// optional uint64 cacheUsed = 12 [default = 0];
/**
* optional uint64 cacheUsed = 12 [default = 0];
*/
boolean hasCacheUsed();
/**
* optional uint64 cacheUsed = 12 [default = 0];
*/
long getCacheUsed();
// optional uint64 lastUpdateMonotonic = 13 [default = 0];
/**
* optional uint64 lastUpdateMonotonic = 13 [default = 0];
*/
boolean hasLastUpdateMonotonic();
/**
* optional uint64 lastUpdateMonotonic = 13 [default = 0];
*/
long getLastUpdateMonotonic();
// optional string upgradeDomain = 14;
/**
* optional string upgradeDomain = 14;
*/
boolean hasUpgradeDomain();
/**
* optional string upgradeDomain = 14;
*/
java.lang.String getUpgradeDomain();
/**
* optional string upgradeDomain = 14;
*/
com.google.protobuf.ByteString
getUpgradeDomainBytes();
// optional uint64 lastBlockReportTime = 15 [default = 0];
/**
* optional uint64 lastBlockReportTime = 15 [default = 0];
*/
boolean hasLastBlockReportTime();
/**
* optional uint64 lastBlockReportTime = 15 [default = 0];
*/
long getLastBlockReportTime();
// optional uint64 lastBlockReportMonotonic = 16 [default = 0];
/**
* optional uint64 lastBlockReportMonotonic = 16 [default = 0];
*/
boolean hasLastBlockReportMonotonic();
/**
* optional uint64 lastBlockReportMonotonic = 16 [default = 0];
*/
long getLastBlockReportMonotonic();
// optional uint32 numBlocks = 17 [default = 0];
/**
* optional uint32 numBlocks = 17 [default = 0];
*/
boolean hasNumBlocks();
/**
* optional uint32 numBlocks = 17 [default = 0];
*/
int getNumBlocks();
}
/**
* Protobuf type {@code hadoop.hdfs.DatanodeInfoProto}
*
*
**
* The status of a Datanode
*
*/
public static final class DatanodeInfoProto extends
com.google.protobuf.GeneratedMessage
implements DatanodeInfoProtoOrBuilder {
// Use DatanodeInfoProto.newBuilder() to construct.
private DatanodeInfoProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private DatanodeInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final DatanodeInfoProto defaultInstance;
public static DatanodeInfoProto getDefaultInstance() {
return defaultInstance;
}
public DatanodeInfoProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DatanodeInfoProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = id_.toBuilder();
}
id_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(id_);
id_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 16: {
bitField0_ |= 0x00000002;
capacity_ = input.readUInt64();
break;
}
case 24: {
bitField0_ |= 0x00000004;
dfsUsed_ = input.readUInt64();
break;
}
case 32: {
bitField0_ |= 0x00000008;
remaining_ = input.readUInt64();
break;
}
case 40: {
bitField0_ |= 0x00000010;
blockPoolUsed_ = input.readUInt64();
break;
}
case 48: {
bitField0_ |= 0x00000020;
lastUpdate_ = input.readUInt64();
break;
}
case 56: {
bitField0_ |= 0x00000040;
xceiverCount_ = input.readUInt32();
break;
}
case 66: {
bitField0_ |= 0x00000080;
location_ = input.readBytes();
break;
}
case 72: {
bitField0_ |= 0x00000100;
nonDfsUsed_ = input.readUInt64();
break;
}
case 80: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(10, rawValue);
} else {
bitField0_ |= 0x00000200;
adminState_ = value;
}
break;
}
case 88: {
bitField0_ |= 0x00000400;
cacheCapacity_ = input.readUInt64();
break;
}
case 96: {
bitField0_ |= 0x00000800;
cacheUsed_ = input.readUInt64();
break;
}
case 104: {
bitField0_ |= 0x00001000;
lastUpdateMonotonic_ = input.readUInt64();
break;
}
case 114: {
bitField0_ |= 0x00002000;
upgradeDomain_ = input.readBytes();
break;
}
case 120: {
bitField0_ |= 0x00004000;
lastBlockReportTime_ = input.readUInt64();
break;
}
case 128: {
bitField0_ |= 0x00008000;
lastBlockReportMonotonic_ = input.readUInt64();
break;
}
case 136: {
bitField0_ |= 0x00010000;
numBlocks_ = input.readUInt32();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public DatanodeInfoProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new DatanodeInfoProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
/**
* Protobuf enum {@code hadoop.hdfs.DatanodeInfoProto.AdminState}
*/
public enum AdminState
implements com.google.protobuf.ProtocolMessageEnum {
/**
* NORMAL = 0;
*/
NORMAL(0, 0),
/**
* DECOMMISSION_INPROGRESS = 1;
*/
DECOMMISSION_INPROGRESS(1, 1),
/**
* DECOMMISSIONED = 2;
*/
DECOMMISSIONED(2, 2),
/**
* ENTERING_MAINTENANCE = 3;
*/
ENTERING_MAINTENANCE(3, 3),
/**
* IN_MAINTENANCE = 4;
*/
IN_MAINTENANCE(4, 4),
;
/**
* NORMAL = 0;
*/
public static final int NORMAL_VALUE = 0;
/**
* DECOMMISSION_INPROGRESS = 1;
*/
public static final int DECOMMISSION_INPROGRESS_VALUE = 1;
/**
* DECOMMISSIONED = 2;
*/
public static final int DECOMMISSIONED_VALUE = 2;
/**
* ENTERING_MAINTENANCE = 3;
*/
public static final int ENTERING_MAINTENANCE_VALUE = 3;
/**
* IN_MAINTENANCE = 4;
*/
public static final int IN_MAINTENANCE_VALUE = 4;
public final int getNumber() { return value; }
public static AdminState valueOf(int value) {
switch (value) {
case 0: return NORMAL;
case 1: return DECOMMISSION_INPROGRESS;
case 2: return DECOMMISSIONED;
case 3: return ENTERING_MAINTENANCE;
case 4: return IN_MAINTENANCE;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public AdminState findValueByNumber(int number) {
return AdminState.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDescriptor().getEnumTypes().get(0);
}
private static final AdminState[] VALUES = values();
public static AdminState valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private AdminState(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.DatanodeInfoProto.AdminState)
}
private int bitField0_;
// required .hadoop.hdfs.DatanodeIDProto id = 1;
public static final int ID_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto id_;
/**
* required .hadoop.hdfs.DatanodeIDProto id = 1;
*/
public boolean hasId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.DatanodeIDProto id = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getId() {
return id_;
}
/**
* required .hadoop.hdfs.DatanodeIDProto id = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getIdOrBuilder() {
return id_;
}
// optional uint64 capacity = 2 [default = 0];
public static final int CAPACITY_FIELD_NUMBER = 2;
private long capacity_;
/**
* optional uint64 capacity = 2 [default = 0];
*/
public boolean hasCapacity() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional uint64 capacity = 2 [default = 0];
*/
public long getCapacity() {
return capacity_;
}
// optional uint64 dfsUsed = 3 [default = 0];
public static final int DFSUSED_FIELD_NUMBER = 3;
private long dfsUsed_;
/**
* optional uint64 dfsUsed = 3 [default = 0];
*/
public boolean hasDfsUsed() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional uint64 dfsUsed = 3 [default = 0];
*/
public long getDfsUsed() {
return dfsUsed_;
}
// optional uint64 remaining = 4 [default = 0];
public static final int REMAINING_FIELD_NUMBER = 4;
private long remaining_;
/**
* optional uint64 remaining = 4 [default = 0];
*/
public boolean hasRemaining() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional uint64 remaining = 4 [default = 0];
*/
public long getRemaining() {
return remaining_;
}
// optional uint64 blockPoolUsed = 5 [default = 0];
public static final int BLOCKPOOLUSED_FIELD_NUMBER = 5;
private long blockPoolUsed_;
/**
* optional uint64 blockPoolUsed = 5 [default = 0];
*/
public boolean hasBlockPoolUsed() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional uint64 blockPoolUsed = 5 [default = 0];
*/
public long getBlockPoolUsed() {
return blockPoolUsed_;
}
// optional uint64 lastUpdate = 6 [default = 0];
public static final int LASTUPDATE_FIELD_NUMBER = 6;
private long lastUpdate_;
/**
* optional uint64 lastUpdate = 6 [default = 0];
*/
public boolean hasLastUpdate() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional uint64 lastUpdate = 6 [default = 0];
*/
public long getLastUpdate() {
return lastUpdate_;
}
// optional uint32 xceiverCount = 7 [default = 0];
public static final int XCEIVERCOUNT_FIELD_NUMBER = 7;
private int xceiverCount_;
/**
* optional uint32 xceiverCount = 7 [default = 0];
*/
public boolean hasXceiverCount() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional uint32 xceiverCount = 7 [default = 0];
*/
public int getXceiverCount() {
return xceiverCount_;
}
// optional string location = 8;
public static final int LOCATION_FIELD_NUMBER = 8;
private java.lang.Object location_;
/**
* optional string location = 8;
*/
public boolean hasLocation() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* optional string location = 8;
*/
public java.lang.String getLocation() {
java.lang.Object ref = location_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
location_ = s;
}
return s;
}
}
/**
* optional string location = 8;
*/
public com.google.protobuf.ByteString
getLocationBytes() {
java.lang.Object ref = location_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
location_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// optional uint64 nonDfsUsed = 9;
public static final int NONDFSUSED_FIELD_NUMBER = 9;
private long nonDfsUsed_;
/**
* optional uint64 nonDfsUsed = 9;
*/
public boolean hasNonDfsUsed() {
return ((bitField0_ & 0x00000100) == 0x00000100);
}
/**
* optional uint64 nonDfsUsed = 9;
*/
public long getNonDfsUsed() {
return nonDfsUsed_;
}
// optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL];
public static final int ADMINSTATE_FIELD_NUMBER = 10;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState adminState_;
/**
* optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL];
*/
public boolean hasAdminState() {
return ((bitField0_ & 0x00000200) == 0x00000200);
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL];
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState getAdminState() {
return adminState_;
}
// optional uint64 cacheCapacity = 11 [default = 0];
public static final int CACHECAPACITY_FIELD_NUMBER = 11;
private long cacheCapacity_;
/**
* optional uint64 cacheCapacity = 11 [default = 0];
*/
public boolean hasCacheCapacity() {
return ((bitField0_ & 0x00000400) == 0x00000400);
}
/**
* optional uint64 cacheCapacity = 11 [default = 0];
*/
public long getCacheCapacity() {
return cacheCapacity_;
}
// optional uint64 cacheUsed = 12 [default = 0];
public static final int CACHEUSED_FIELD_NUMBER = 12;
private long cacheUsed_;
/**
* optional uint64 cacheUsed = 12 [default = 0];
*/
public boolean hasCacheUsed() {
return ((bitField0_ & 0x00000800) == 0x00000800);
}
/**
* optional uint64 cacheUsed = 12 [default = 0];
*/
public long getCacheUsed() {
return cacheUsed_;
}
// optional uint64 lastUpdateMonotonic = 13 [default = 0];
public static final int LASTUPDATEMONOTONIC_FIELD_NUMBER = 13;
private long lastUpdateMonotonic_;
/**
* optional uint64 lastUpdateMonotonic = 13 [default = 0];
*/
public boolean hasLastUpdateMonotonic() {
return ((bitField0_ & 0x00001000) == 0x00001000);
}
/**
* optional uint64 lastUpdateMonotonic = 13 [default = 0];
*/
public long getLastUpdateMonotonic() {
return lastUpdateMonotonic_;
}
// optional string upgradeDomain = 14;
public static final int UPGRADEDOMAIN_FIELD_NUMBER = 14;
private java.lang.Object upgradeDomain_;
/**
* optional string upgradeDomain = 14;
*/
public boolean hasUpgradeDomain() {
return ((bitField0_ & 0x00002000) == 0x00002000);
}
/**
* optional string upgradeDomain = 14;
*/
public java.lang.String getUpgradeDomain() {
java.lang.Object ref = upgradeDomain_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
upgradeDomain_ = s;
}
return s;
}
}
/**
* optional string upgradeDomain = 14;
*/
public com.google.protobuf.ByteString
getUpgradeDomainBytes() {
java.lang.Object ref = upgradeDomain_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
upgradeDomain_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// optional uint64 lastBlockReportTime = 15 [default = 0];
public static final int LASTBLOCKREPORTTIME_FIELD_NUMBER = 15;
private long lastBlockReportTime_;
/**
* optional uint64 lastBlockReportTime = 15 [default = 0];
*/
public boolean hasLastBlockReportTime() {
return ((bitField0_ & 0x00004000) == 0x00004000);
}
/**
* optional uint64 lastBlockReportTime = 15 [default = 0];
*/
public long getLastBlockReportTime() {
return lastBlockReportTime_;
}
// optional uint64 lastBlockReportMonotonic = 16 [default = 0];
public static final int LASTBLOCKREPORTMONOTONIC_FIELD_NUMBER = 16;
private long lastBlockReportMonotonic_;
/**
* optional uint64 lastBlockReportMonotonic = 16 [default = 0];
*/
public boolean hasLastBlockReportMonotonic() {
return ((bitField0_ & 0x00008000) == 0x00008000);
}
/**
* optional uint64 lastBlockReportMonotonic = 16 [default = 0];
*/
public long getLastBlockReportMonotonic() {
return lastBlockReportMonotonic_;
}
// optional uint32 numBlocks = 17 [default = 0];
public static final int NUMBLOCKS_FIELD_NUMBER = 17;
private int numBlocks_;
/**
* optional uint32 numBlocks = 17 [default = 0];
*/
public boolean hasNumBlocks() {
return ((bitField0_ & 0x00010000) == 0x00010000);
}
/**
* optional uint32 numBlocks = 17 [default = 0];
*/
public int getNumBlocks() {
return numBlocks_;
}
private void initFields() {
id_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance();
capacity_ = 0L;
dfsUsed_ = 0L;
remaining_ = 0L;
blockPoolUsed_ = 0L;
lastUpdate_ = 0L;
xceiverCount_ = 0;
location_ = "";
nonDfsUsed_ = 0L;
adminState_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.NORMAL;
cacheCapacity_ = 0L;
cacheUsed_ = 0L;
lastUpdateMonotonic_ = 0L;
upgradeDomain_ = "";
lastBlockReportTime_ = 0L;
lastBlockReportMonotonic_ = 0L;
numBlocks_ = 0;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasId()) {
memoizedIsInitialized = 0;
return false;
}
if (!getId().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, id_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, capacity_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt64(3, dfsUsed_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt64(4, remaining_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeUInt64(5, blockPoolUsed_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeUInt64(6, lastUpdate_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeUInt32(7, xceiverCount_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
output.writeBytes(8, getLocationBytes());
}
if (((bitField0_ & 0x00000100) == 0x00000100)) {
output.writeUInt64(9, nonDfsUsed_);
}
if (((bitField0_ & 0x00000200) == 0x00000200)) {
output.writeEnum(10, adminState_.getNumber());
}
if (((bitField0_ & 0x00000400) == 0x00000400)) {
output.writeUInt64(11, cacheCapacity_);
}
if (((bitField0_ & 0x00000800) == 0x00000800)) {
output.writeUInt64(12, cacheUsed_);
}
if (((bitField0_ & 0x00001000) == 0x00001000)) {
output.writeUInt64(13, lastUpdateMonotonic_);
}
if (((bitField0_ & 0x00002000) == 0x00002000)) {
output.writeBytes(14, getUpgradeDomainBytes());
}
if (((bitField0_ & 0x00004000) == 0x00004000)) {
output.writeUInt64(15, lastBlockReportTime_);
}
if (((bitField0_ & 0x00008000) == 0x00008000)) {
output.writeUInt64(16, lastBlockReportMonotonic_);
}
if (((bitField0_ & 0x00010000) == 0x00010000)) {
output.writeUInt32(17, numBlocks_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, id_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, capacity_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(3, dfsUsed_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(4, remaining_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(5, blockPoolUsed_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(6, lastUpdate_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(7, xceiverCount_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(8, getLocationBytes());
}
if (((bitField0_ & 0x00000100) == 0x00000100)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(9, nonDfsUsed_);
}
if (((bitField0_ & 0x00000200) == 0x00000200)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(10, adminState_.getNumber());
}
if (((bitField0_ & 0x00000400) == 0x00000400)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(11, cacheCapacity_);
}
if (((bitField0_ & 0x00000800) == 0x00000800)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(12, cacheUsed_);
}
if (((bitField0_ & 0x00001000) == 0x00001000)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(13, lastUpdateMonotonic_);
}
if (((bitField0_ & 0x00002000) == 0x00002000)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(14, getUpgradeDomainBytes());
}
if (((bitField0_ & 0x00004000) == 0x00004000)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(15, lastBlockReportTime_);
}
if (((bitField0_ & 0x00008000) == 0x00008000)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(16, lastBlockReportMonotonic_);
}
if (((bitField0_ & 0x00010000) == 0x00010000)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(17, numBlocks_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto) obj;
boolean result = true;
result = result && (hasId() == other.hasId());
if (hasId()) {
result = result && getId()
.equals(other.getId());
}
result = result && (hasCapacity() == other.hasCapacity());
if (hasCapacity()) {
result = result && (getCapacity()
== other.getCapacity());
}
result = result && (hasDfsUsed() == other.hasDfsUsed());
if (hasDfsUsed()) {
result = result && (getDfsUsed()
== other.getDfsUsed());
}
result = result && (hasRemaining() == other.hasRemaining());
if (hasRemaining()) {
result = result && (getRemaining()
== other.getRemaining());
}
result = result && (hasBlockPoolUsed() == other.hasBlockPoolUsed());
if (hasBlockPoolUsed()) {
result = result && (getBlockPoolUsed()
== other.getBlockPoolUsed());
}
result = result && (hasLastUpdate() == other.hasLastUpdate());
if (hasLastUpdate()) {
result = result && (getLastUpdate()
== other.getLastUpdate());
}
result = result && (hasXceiverCount() == other.hasXceiverCount());
if (hasXceiverCount()) {
result = result && (getXceiverCount()
== other.getXceiverCount());
}
result = result && (hasLocation() == other.hasLocation());
if (hasLocation()) {
result = result && getLocation()
.equals(other.getLocation());
}
result = result && (hasNonDfsUsed() == other.hasNonDfsUsed());
if (hasNonDfsUsed()) {
result = result && (getNonDfsUsed()
== other.getNonDfsUsed());
}
result = result && (hasAdminState() == other.hasAdminState());
if (hasAdminState()) {
result = result &&
(getAdminState() == other.getAdminState());
}
result = result && (hasCacheCapacity() == other.hasCacheCapacity());
if (hasCacheCapacity()) {
result = result && (getCacheCapacity()
== other.getCacheCapacity());
}
result = result && (hasCacheUsed() == other.hasCacheUsed());
if (hasCacheUsed()) {
result = result && (getCacheUsed()
== other.getCacheUsed());
}
result = result && (hasLastUpdateMonotonic() == other.hasLastUpdateMonotonic());
if (hasLastUpdateMonotonic()) {
result = result && (getLastUpdateMonotonic()
== other.getLastUpdateMonotonic());
}
result = result && (hasUpgradeDomain() == other.hasUpgradeDomain());
if (hasUpgradeDomain()) {
result = result && getUpgradeDomain()
.equals(other.getUpgradeDomain());
}
result = result && (hasLastBlockReportTime() == other.hasLastBlockReportTime());
if (hasLastBlockReportTime()) {
result = result && (getLastBlockReportTime()
== other.getLastBlockReportTime());
}
result = result && (hasLastBlockReportMonotonic() == other.hasLastBlockReportMonotonic());
if (hasLastBlockReportMonotonic()) {
result = result && (getLastBlockReportMonotonic()
== other.getLastBlockReportMonotonic());
}
result = result && (hasNumBlocks() == other.hasNumBlocks());
if (hasNumBlocks()) {
result = result && (getNumBlocks()
== other.getNumBlocks());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasId()) {
hash = (37 * hash) + ID_FIELD_NUMBER;
hash = (53 * hash) + getId().hashCode();
}
if (hasCapacity()) {
hash = (37 * hash) + CAPACITY_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getCapacity());
}
if (hasDfsUsed()) {
hash = (37 * hash) + DFSUSED_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getDfsUsed());
}
if (hasRemaining()) {
hash = (37 * hash) + REMAINING_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getRemaining());
}
if (hasBlockPoolUsed()) {
hash = (37 * hash) + BLOCKPOOLUSED_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getBlockPoolUsed());
}
if (hasLastUpdate()) {
hash = (37 * hash) + LASTUPDATE_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getLastUpdate());
}
if (hasXceiverCount()) {
hash = (37 * hash) + XCEIVERCOUNT_FIELD_NUMBER;
hash = (53 * hash) + getXceiverCount();
}
if (hasLocation()) {
hash = (37 * hash) + LOCATION_FIELD_NUMBER;
hash = (53 * hash) + getLocation().hashCode();
}
if (hasNonDfsUsed()) {
hash = (37 * hash) + NONDFSUSED_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getNonDfsUsed());
}
if (hasAdminState()) {
hash = (37 * hash) + ADMINSTATE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getAdminState());
}
if (hasCacheCapacity()) {
hash = (37 * hash) + CACHECAPACITY_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getCacheCapacity());
}
if (hasCacheUsed()) {
hash = (37 * hash) + CACHEUSED_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getCacheUsed());
}
if (hasLastUpdateMonotonic()) {
hash = (37 * hash) + LASTUPDATEMONOTONIC_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getLastUpdateMonotonic());
}
if (hasUpgradeDomain()) {
hash = (37 * hash) + UPGRADEDOMAIN_FIELD_NUMBER;
hash = (53 * hash) + getUpgradeDomain().hashCode();
}
if (hasLastBlockReportTime()) {
hash = (37 * hash) + LASTBLOCKREPORTTIME_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getLastBlockReportTime());
}
if (hasLastBlockReportMonotonic()) {
hash = (37 * hash) + LASTBLOCKREPORTMONOTONIC_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getLastBlockReportMonotonic());
}
if (hasNumBlocks()) {
hash = (37 * hash) + NUMBLOCKS_FIELD_NUMBER;
hash = (53 * hash) + getNumBlocks();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.DatanodeInfoProto}
*
*
**
* The status of a Datanode
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getIdFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (idBuilder_ == null) {
id_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance();
} else {
idBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
capacity_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
dfsUsed_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
remaining_ = 0L;
bitField0_ = (bitField0_ & ~0x00000008);
blockPoolUsed_ = 0L;
bitField0_ = (bitField0_ & ~0x00000010);
lastUpdate_ = 0L;
bitField0_ = (bitField0_ & ~0x00000020);
xceiverCount_ = 0;
bitField0_ = (bitField0_ & ~0x00000040);
location_ = "";
bitField0_ = (bitField0_ & ~0x00000080);
nonDfsUsed_ = 0L;
bitField0_ = (bitField0_ & ~0x00000100);
adminState_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.NORMAL;
bitField0_ = (bitField0_ & ~0x00000200);
cacheCapacity_ = 0L;
bitField0_ = (bitField0_ & ~0x00000400);
cacheUsed_ = 0L;
bitField0_ = (bitField0_ & ~0x00000800);
lastUpdateMonotonic_ = 0L;
bitField0_ = (bitField0_ & ~0x00001000);
upgradeDomain_ = "";
bitField0_ = (bitField0_ & ~0x00002000);
lastBlockReportTime_ = 0L;
bitField0_ = (bitField0_ & ~0x00004000);
lastBlockReportMonotonic_ = 0L;
bitField0_ = (bitField0_ & ~0x00008000);
numBlocks_ = 0;
bitField0_ = (bitField0_ & ~0x00010000);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfoProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (idBuilder_ == null) {
result.id_ = id_;
} else {
result.id_ = idBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.capacity_ = capacity_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.dfsUsed_ = dfsUsed_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.remaining_ = remaining_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.blockPoolUsed_ = blockPoolUsed_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000020;
}
result.lastUpdate_ = lastUpdate_;
if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
to_bitField0_ |= 0x00000040;
}
result.xceiverCount_ = xceiverCount_;
if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
to_bitField0_ |= 0x00000080;
}
result.location_ = location_;
if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
to_bitField0_ |= 0x00000100;
}
result.nonDfsUsed_ = nonDfsUsed_;
if (((from_bitField0_ & 0x00000200) == 0x00000200)) {
to_bitField0_ |= 0x00000200;
}
result.adminState_ = adminState_;
if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
to_bitField0_ |= 0x00000400;
}
result.cacheCapacity_ = cacheCapacity_;
if (((from_bitField0_ & 0x00000800) == 0x00000800)) {
to_bitField0_ |= 0x00000800;
}
result.cacheUsed_ = cacheUsed_;
if (((from_bitField0_ & 0x00001000) == 0x00001000)) {
to_bitField0_ |= 0x00001000;
}
result.lastUpdateMonotonic_ = lastUpdateMonotonic_;
if (((from_bitField0_ & 0x00002000) == 0x00002000)) {
to_bitField0_ |= 0x00002000;
}
result.upgradeDomain_ = upgradeDomain_;
if (((from_bitField0_ & 0x00004000) == 0x00004000)) {
to_bitField0_ |= 0x00004000;
}
result.lastBlockReportTime_ = lastBlockReportTime_;
if (((from_bitField0_ & 0x00008000) == 0x00008000)) {
to_bitField0_ |= 0x00008000;
}
result.lastBlockReportMonotonic_ = lastBlockReportMonotonic_;
if (((from_bitField0_ & 0x00010000) == 0x00010000)) {
to_bitField0_ |= 0x00010000;
}
result.numBlocks_ = numBlocks_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()) return this;
if (other.hasId()) {
mergeId(other.getId());
}
if (other.hasCapacity()) {
setCapacity(other.getCapacity());
}
if (other.hasDfsUsed()) {
setDfsUsed(other.getDfsUsed());
}
if (other.hasRemaining()) {
setRemaining(other.getRemaining());
}
if (other.hasBlockPoolUsed()) {
setBlockPoolUsed(other.getBlockPoolUsed());
}
if (other.hasLastUpdate()) {
setLastUpdate(other.getLastUpdate());
}
if (other.hasXceiverCount()) {
setXceiverCount(other.getXceiverCount());
}
if (other.hasLocation()) {
bitField0_ |= 0x00000080;
location_ = other.location_;
onChanged();
}
if (other.hasNonDfsUsed()) {
setNonDfsUsed(other.getNonDfsUsed());
}
if (other.hasAdminState()) {
setAdminState(other.getAdminState());
}
if (other.hasCacheCapacity()) {
setCacheCapacity(other.getCacheCapacity());
}
if (other.hasCacheUsed()) {
setCacheUsed(other.getCacheUsed());
}
if (other.hasLastUpdateMonotonic()) {
setLastUpdateMonotonic(other.getLastUpdateMonotonic());
}
if (other.hasUpgradeDomain()) {
bitField0_ |= 0x00002000;
upgradeDomain_ = other.upgradeDomain_;
onChanged();
}
if (other.hasLastBlockReportTime()) {
setLastBlockReportTime(other.getLastBlockReportTime());
}
if (other.hasLastBlockReportMonotonic()) {
setLastBlockReportMonotonic(other.getLastBlockReportMonotonic());
}
if (other.hasNumBlocks()) {
setNumBlocks(other.getNumBlocks());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasId()) {
return false;
}
if (!getId().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.DatanodeIDProto id = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto id_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> idBuilder_;
/**
* required .hadoop.hdfs.DatanodeIDProto id = 1;
*/
public boolean hasId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.DatanodeIDProto id = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getId() {
if (idBuilder_ == null) {
return id_;
} else {
return idBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.DatanodeIDProto id = 1;
*/
public Builder setId(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
if (idBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
id_ = value;
onChanged();
} else {
idBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.DatanodeIDProto id = 1;
*/
public Builder setId(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) {
if (idBuilder_ == null) {
id_ = builderForValue.build();
onChanged();
} else {
idBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.DatanodeIDProto id = 1;
*/
public Builder mergeId(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
if (idBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
id_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()) {
id_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder(id_).mergeFrom(value).buildPartial();
} else {
id_ = value;
}
onChanged();
} else {
idBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.DatanodeIDProto id = 1;
*/
public Builder clearId() {
if (idBuilder_ == null) {
id_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance();
onChanged();
} else {
idBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.DatanodeIDProto id = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder getIdBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getIdFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.DatanodeIDProto id = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getIdOrBuilder() {
if (idBuilder_ != null) {
return idBuilder_.getMessageOrBuilder();
} else {
return id_;
}
}
/**
* required .hadoop.hdfs.DatanodeIDProto id = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>
getIdFieldBuilder() {
if (idBuilder_ == null) {
idBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>(
id_,
getParentForChildren(),
isClean());
id_ = null;
}
return idBuilder_;
}
// optional uint64 capacity = 2 [default = 0];
private long capacity_ ;
/**
* optional uint64 capacity = 2 [default = 0];
*/
public boolean hasCapacity() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional uint64 capacity = 2 [default = 0];
*/
public long getCapacity() {
return capacity_;
}
/**
* optional uint64 capacity = 2 [default = 0];
*/
public Builder setCapacity(long value) {
bitField0_ |= 0x00000002;
capacity_ = value;
onChanged();
return this;
}
/**
* optional uint64 capacity = 2 [default = 0];
*/
public Builder clearCapacity() {
bitField0_ = (bitField0_ & ~0x00000002);
capacity_ = 0L;
onChanged();
return this;
}
// optional uint64 dfsUsed = 3 [default = 0];
private long dfsUsed_ ;
/**
* optional uint64 dfsUsed = 3 [default = 0];
*/
public boolean hasDfsUsed() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional uint64 dfsUsed = 3 [default = 0];
*/
public long getDfsUsed() {
return dfsUsed_;
}
/**
* optional uint64 dfsUsed = 3 [default = 0];
*/
public Builder setDfsUsed(long value) {
bitField0_ |= 0x00000004;
dfsUsed_ = value;
onChanged();
return this;
}
/**
* optional uint64 dfsUsed = 3 [default = 0];
*/
public Builder clearDfsUsed() {
bitField0_ = (bitField0_ & ~0x00000004);
dfsUsed_ = 0L;
onChanged();
return this;
}
// optional uint64 remaining = 4 [default = 0];
private long remaining_ ;
/**
* optional uint64 remaining = 4 [default = 0];
*/
public boolean hasRemaining() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional uint64 remaining = 4 [default = 0];
*/
public long getRemaining() {
return remaining_;
}
/**
* optional uint64 remaining = 4 [default = 0];
*/
public Builder setRemaining(long value) {
bitField0_ |= 0x00000008;
remaining_ = value;
onChanged();
return this;
}
/**
* optional uint64 remaining = 4 [default = 0];
*/
public Builder clearRemaining() {
bitField0_ = (bitField0_ & ~0x00000008);
remaining_ = 0L;
onChanged();
return this;
}
// optional uint64 blockPoolUsed = 5 [default = 0];
private long blockPoolUsed_ ;
/**
* optional uint64 blockPoolUsed = 5 [default = 0];
*/
public boolean hasBlockPoolUsed() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional uint64 blockPoolUsed = 5 [default = 0];
*/
public long getBlockPoolUsed() {
return blockPoolUsed_;
}
/**
* optional uint64 blockPoolUsed = 5 [default = 0];
*/
public Builder setBlockPoolUsed(long value) {
bitField0_ |= 0x00000010;
blockPoolUsed_ = value;
onChanged();
return this;
}
/**
* optional uint64 blockPoolUsed = 5 [default = 0];
*/
public Builder clearBlockPoolUsed() {
bitField0_ = (bitField0_ & ~0x00000010);
blockPoolUsed_ = 0L;
onChanged();
return this;
}
// optional uint64 lastUpdate = 6 [default = 0];
private long lastUpdate_ ;
/**
* optional uint64 lastUpdate = 6 [default = 0];
*/
public boolean hasLastUpdate() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional uint64 lastUpdate = 6 [default = 0];
*/
public long getLastUpdate() {
return lastUpdate_;
}
/**
* optional uint64 lastUpdate = 6 [default = 0];
*/
public Builder setLastUpdate(long value) {
bitField0_ |= 0x00000020;
lastUpdate_ = value;
onChanged();
return this;
}
/**
* optional uint64 lastUpdate = 6 [default = 0];
*/
public Builder clearLastUpdate() {
bitField0_ = (bitField0_ & ~0x00000020);
lastUpdate_ = 0L;
onChanged();
return this;
}
// optional uint32 xceiverCount = 7 [default = 0];
private int xceiverCount_ ;
/**
* optional uint32 xceiverCount = 7 [default = 0];
*/
public boolean hasXceiverCount() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional uint32 xceiverCount = 7 [default = 0];
*/
public int getXceiverCount() {
return xceiverCount_;
}
/**
* optional uint32 xceiverCount = 7 [default = 0];
*/
public Builder setXceiverCount(int value) {
bitField0_ |= 0x00000040;
xceiverCount_ = value;
onChanged();
return this;
}
/**
* optional uint32 xceiverCount = 7 [default = 0];
*/
public Builder clearXceiverCount() {
bitField0_ = (bitField0_ & ~0x00000040);
xceiverCount_ = 0;
onChanged();
return this;
}
// optional string location = 8;
private java.lang.Object location_ = "";
/**
* optional string location = 8;
*/
public boolean hasLocation() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* optional string location = 8;
*/
public java.lang.String getLocation() {
java.lang.Object ref = location_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
location_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string location = 8;
*/
public com.google.protobuf.ByteString
getLocationBytes() {
java.lang.Object ref = location_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
location_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* optional string location = 8;
*/
public Builder setLocation(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000080;
location_ = value;
onChanged();
return this;
}
/**
* optional string location = 8;
*/
public Builder clearLocation() {
bitField0_ = (bitField0_ & ~0x00000080);
location_ = getDefaultInstance().getLocation();
onChanged();
return this;
}
/**
* optional string location = 8;
*/
public Builder setLocationBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000080;
location_ = value;
onChanged();
return this;
}
// optional uint64 nonDfsUsed = 9;
private long nonDfsUsed_ ;
/**
* optional uint64 nonDfsUsed = 9;
*/
public boolean hasNonDfsUsed() {
return ((bitField0_ & 0x00000100) == 0x00000100);
}
/**
* optional uint64 nonDfsUsed = 9;
*/
public long getNonDfsUsed() {
return nonDfsUsed_;
}
/**
* optional uint64 nonDfsUsed = 9;
*/
public Builder setNonDfsUsed(long value) {
bitField0_ |= 0x00000100;
nonDfsUsed_ = value;
onChanged();
return this;
}
/**
* optional uint64 nonDfsUsed = 9;
*/
public Builder clearNonDfsUsed() {
bitField0_ = (bitField0_ & ~0x00000100);
nonDfsUsed_ = 0L;
onChanged();
return this;
}
// optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL];
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState adminState_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.NORMAL;
/**
* optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL];
*/
public boolean hasAdminState() {
return ((bitField0_ & 0x00000200) == 0x00000200);
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL];
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState getAdminState() {
return adminState_;
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL];
*/
public Builder setAdminState(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000200;
adminState_ = value;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL];
*/
public Builder clearAdminState() {
bitField0_ = (bitField0_ & ~0x00000200);
adminState_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.NORMAL;
onChanged();
return this;
}
// optional uint64 cacheCapacity = 11 [default = 0];
private long cacheCapacity_ ;
/**
* optional uint64 cacheCapacity = 11 [default = 0];
*/
public boolean hasCacheCapacity() {
return ((bitField0_ & 0x00000400) == 0x00000400);
}
/**
* optional uint64 cacheCapacity = 11 [default = 0];
*/
public long getCacheCapacity() {
return cacheCapacity_;
}
/**
* optional uint64 cacheCapacity = 11 [default = 0];
*/
public Builder setCacheCapacity(long value) {
bitField0_ |= 0x00000400;
cacheCapacity_ = value;
onChanged();
return this;
}
/**
* optional uint64 cacheCapacity = 11 [default = 0];
*/
public Builder clearCacheCapacity() {
bitField0_ = (bitField0_ & ~0x00000400);
cacheCapacity_ = 0L;
onChanged();
return this;
}
// optional uint64 cacheUsed = 12 [default = 0];
private long cacheUsed_ ;
/**
* optional uint64 cacheUsed = 12 [default = 0];
*/
public boolean hasCacheUsed() {
return ((bitField0_ & 0x00000800) == 0x00000800);
}
/**
* optional uint64 cacheUsed = 12 [default = 0];
*/
public long getCacheUsed() {
return cacheUsed_;
}
/**
* optional uint64 cacheUsed = 12 [default = 0];
*/
public Builder setCacheUsed(long value) {
bitField0_ |= 0x00000800;
cacheUsed_ = value;
onChanged();
return this;
}
/**
* optional uint64 cacheUsed = 12 [default = 0];
*/
public Builder clearCacheUsed() {
bitField0_ = (bitField0_ & ~0x00000800);
cacheUsed_ = 0L;
onChanged();
return this;
}
// optional uint64 lastUpdateMonotonic = 13 [default = 0];
private long lastUpdateMonotonic_ ;
/**
* optional uint64 lastUpdateMonotonic = 13 [default = 0];
*/
public boolean hasLastUpdateMonotonic() {
return ((bitField0_ & 0x00001000) == 0x00001000);
}
/**
* optional uint64 lastUpdateMonotonic = 13 [default = 0];
*/
public long getLastUpdateMonotonic() {
return lastUpdateMonotonic_;
}
/**
* optional uint64 lastUpdateMonotonic = 13 [default = 0];
*/
public Builder setLastUpdateMonotonic(long value) {
bitField0_ |= 0x00001000;
lastUpdateMonotonic_ = value;
onChanged();
return this;
}
/**
* optional uint64 lastUpdateMonotonic = 13 [default = 0];
*/
public Builder clearLastUpdateMonotonic() {
bitField0_ = (bitField0_ & ~0x00001000);
lastUpdateMonotonic_ = 0L;
onChanged();
return this;
}
// optional string upgradeDomain = 14;
private java.lang.Object upgradeDomain_ = "";
/**
* optional string upgradeDomain = 14;
*/
public boolean hasUpgradeDomain() {
return ((bitField0_ & 0x00002000) == 0x00002000);
}
/**
* optional string upgradeDomain = 14;
*/
public java.lang.String getUpgradeDomain() {
java.lang.Object ref = upgradeDomain_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
upgradeDomain_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string upgradeDomain = 14;
*/
public com.google.protobuf.ByteString
getUpgradeDomainBytes() {
java.lang.Object ref = upgradeDomain_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
upgradeDomain_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* optional string upgradeDomain = 14;
*/
public Builder setUpgradeDomain(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00002000;
upgradeDomain_ = value;
onChanged();
return this;
}
/**
* optional string upgradeDomain = 14;
*/
public Builder clearUpgradeDomain() {
bitField0_ = (bitField0_ & ~0x00002000);
upgradeDomain_ = getDefaultInstance().getUpgradeDomain();
onChanged();
return this;
}
/**
* optional string upgradeDomain = 14;
*/
public Builder setUpgradeDomainBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00002000;
upgradeDomain_ = value;
onChanged();
return this;
}
// optional uint64 lastBlockReportTime = 15 [default = 0];
private long lastBlockReportTime_ ;
/**
* optional uint64 lastBlockReportTime = 15 [default = 0];
*/
public boolean hasLastBlockReportTime() {
return ((bitField0_ & 0x00004000) == 0x00004000);
}
/**
* optional uint64 lastBlockReportTime = 15 [default = 0];
*/
public long getLastBlockReportTime() {
return lastBlockReportTime_;
}
/**
* optional uint64 lastBlockReportTime = 15 [default = 0];
*/
public Builder setLastBlockReportTime(long value) {
bitField0_ |= 0x00004000;
lastBlockReportTime_ = value;
onChanged();
return this;
}
/**
* optional uint64 lastBlockReportTime = 15 [default = 0];
*/
public Builder clearLastBlockReportTime() {
bitField0_ = (bitField0_ & ~0x00004000);
lastBlockReportTime_ = 0L;
onChanged();
return this;
}
// optional uint64 lastBlockReportMonotonic = 16 [default = 0];
private long lastBlockReportMonotonic_ ;
/**
* optional uint64 lastBlockReportMonotonic = 16 [default = 0];
*/
public boolean hasLastBlockReportMonotonic() {
return ((bitField0_ & 0x00008000) == 0x00008000);
}
/**
* optional uint64 lastBlockReportMonotonic = 16 [default = 0];
*/
public long getLastBlockReportMonotonic() {
return lastBlockReportMonotonic_;
}
/**
* optional uint64 lastBlockReportMonotonic = 16 [default = 0];
*/
public Builder setLastBlockReportMonotonic(long value) {
bitField0_ |= 0x00008000;
lastBlockReportMonotonic_ = value;
onChanged();
return this;
}
/**
* optional uint64 lastBlockReportMonotonic = 16 [default = 0];
*/
public Builder clearLastBlockReportMonotonic() {
bitField0_ = (bitField0_ & ~0x00008000);
lastBlockReportMonotonic_ = 0L;
onChanged();
return this;
}
// optional uint32 numBlocks = 17 [default = 0];
private int numBlocks_ ;
/**
* optional uint32 numBlocks = 17 [default = 0];
*/
public boolean hasNumBlocks() {
return ((bitField0_ & 0x00010000) == 0x00010000);
}
/**
* optional uint32 numBlocks = 17 [default = 0];
*/
public int getNumBlocks() {
return numBlocks_;
}
/**
* optional uint32 numBlocks = 17 [default = 0];
*/
public Builder setNumBlocks(int value) {
bitField0_ |= 0x00010000;
numBlocks_ = value;
onChanged();
return this;
}
/**
* optional uint32 numBlocks = 17 [default = 0];
*/
public Builder clearNumBlocks() {
bitField0_ = (bitField0_ & ~0x00010000);
numBlocks_ = 0;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeInfoProto)
}
static {
defaultInstance = new DatanodeInfoProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeInfoProto)
}
public interface DatanodeStorageProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string storageUuid = 1;
/**
* required string storageUuid = 1;
*/
boolean hasStorageUuid();
/**
* required string storageUuid = 1;
*/
java.lang.String getStorageUuid();
/**
* required string storageUuid = 1;
*/
com.google.protobuf.ByteString
getStorageUuidBytes();
// optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL];
/**
* optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL];
*/
boolean hasState();
/**
* optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL];
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState getState();
// optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK];
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK];
*/
boolean hasStorageType();
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK];
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType();
}
/**
* Protobuf type {@code hadoop.hdfs.DatanodeStorageProto}
*
*
**
* Represents a storage available on the datanode
*
*/
public static final class DatanodeStorageProto extends
com.google.protobuf.GeneratedMessage
implements DatanodeStorageProtoOrBuilder {
// Use DatanodeStorageProto.newBuilder() to construct.
private DatanodeStorageProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private DatanodeStorageProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final DatanodeStorageProto defaultInstance;
public static DatanodeStorageProto getDefaultInstance() {
return defaultInstance;
}
public DatanodeStorageProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DatanodeStorageProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
storageUuid_ = input.readBytes();
break;
}
case 16: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(2, rawValue);
} else {
bitField0_ |= 0x00000002;
state_ = value;
}
break;
}
case 24: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(3, rawValue);
} else {
bitField0_ |= 0x00000004;
storageType_ = value;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeStorageProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeStorageProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public DatanodeStorageProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new DatanodeStorageProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
/**
* Protobuf enum {@code hadoop.hdfs.DatanodeStorageProto.StorageState}
*/
public enum StorageState
implements com.google.protobuf.ProtocolMessageEnum {
/**
* NORMAL = 0;
*/
NORMAL(0, 0),
/**
* READ_ONLY_SHARED = 1;
*/
READ_ONLY_SHARED(1, 1),
;
/**
* NORMAL = 0;
*/
public static final int NORMAL_VALUE = 0;
/**
* READ_ONLY_SHARED = 1;
*/
public static final int READ_ONLY_SHARED_VALUE = 1;
public final int getNumber() { return value; }
public static StorageState valueOf(int value) {
switch (value) {
case 0: return NORMAL;
case 1: return READ_ONLY_SHARED;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public StorageState findValueByNumber(int number) {
return StorageState.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDescriptor().getEnumTypes().get(0);
}
private static final StorageState[] VALUES = values();
public static StorageState valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private StorageState(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.DatanodeStorageProto.StorageState)
}
private int bitField0_;
// required string storageUuid = 1;
public static final int STORAGEUUID_FIELD_NUMBER = 1;
private java.lang.Object storageUuid_;
/**
* required string storageUuid = 1;
*/
public boolean hasStorageUuid() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string storageUuid = 1;
*/
public java.lang.String getStorageUuid() {
java.lang.Object ref = storageUuid_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
storageUuid_ = s;
}
return s;
}
}
/**
* required string storageUuid = 1;
*/
public com.google.protobuf.ByteString
getStorageUuidBytes() {
java.lang.Object ref = storageUuid_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
storageUuid_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL];
public static final int STATE_FIELD_NUMBER = 2;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState state_;
/**
* optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL];
*/
public boolean hasState() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL];
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState getState() {
return state_;
}
// optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK];
public static final int STORAGETYPE_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto storageType_;
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK];
*/
public boolean hasStorageType() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK];
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() {
return storageType_;
}
private void initFields() {
storageUuid_ = "";
state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState.NORMAL;
storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasStorageUuid()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getStorageUuidBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeEnum(2, state_.getNumber());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeEnum(3, storageType_.getNumber());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getStorageUuidBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(2, state_.getNumber());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(3, storageType_.getNumber());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto) obj;
boolean result = true;
result = result && (hasStorageUuid() == other.hasStorageUuid());
if (hasStorageUuid()) {
result = result && getStorageUuid()
.equals(other.getStorageUuid());
}
result = result && (hasState() == other.hasState());
if (hasState()) {
result = result &&
(getState() == other.getState());
}
result = result && (hasStorageType() == other.hasStorageType());
if (hasStorageType()) {
result = result &&
(getStorageType() == other.getStorageType());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasStorageUuid()) {
hash = (37 * hash) + STORAGEUUID_FIELD_NUMBER;
hash = (53 * hash) + getStorageUuid().hashCode();
}
if (hasState()) {
hash = (37 * hash) + STATE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getState());
}
if (hasStorageType()) {
hash = (37 * hash) + STORAGETYPE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getStorageType());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.DatanodeStorageProto}
*
*
**
* Represents a storage available on the datanode
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeStorageProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeStorageProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
storageUuid_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState.NORMAL;
bitField0_ = (bitField0_ & ~0x00000002);
storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeStorageProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.storageUuid_ = storageUuid_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.state_ = state_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.storageType_ = storageType_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance()) return this;
if (other.hasStorageUuid()) {
bitField0_ |= 0x00000001;
storageUuid_ = other.storageUuid_;
onChanged();
}
if (other.hasState()) {
setState(other.getState());
}
if (other.hasStorageType()) {
setStorageType(other.getStorageType());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasStorageUuid()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string storageUuid = 1;
private java.lang.Object storageUuid_ = "";
/**
* required string storageUuid = 1;
*/
public boolean hasStorageUuid() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string storageUuid = 1;
*/
public java.lang.String getStorageUuid() {
java.lang.Object ref = storageUuid_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
storageUuid_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string storageUuid = 1;
*/
public com.google.protobuf.ByteString
getStorageUuidBytes() {
java.lang.Object ref = storageUuid_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
storageUuid_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string storageUuid = 1;
*/
public Builder setStorageUuid(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
storageUuid_ = value;
onChanged();
return this;
}
/**
* required string storageUuid = 1;
*/
public Builder clearStorageUuid() {
bitField0_ = (bitField0_ & ~0x00000001);
storageUuid_ = getDefaultInstance().getStorageUuid();
onChanged();
return this;
}
/**
* required string storageUuid = 1;
*/
public Builder setStorageUuidBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
storageUuid_ = value;
onChanged();
return this;
}
// optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL];
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState.NORMAL;
/**
* optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL];
*/
public boolean hasState() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL];
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState getState() {
return state_;
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL];
*/
public Builder setState(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
state_ = value;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL];
*/
public Builder clearState() {
bitField0_ = (bitField0_ & ~0x00000002);
state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState.NORMAL;
onChanged();
return this;
}
// optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK];
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK];
*/
public boolean hasStorageType() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK];
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() {
return storageType_;
}
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK];
*/
public Builder setStorageType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
storageType_ = value;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK];
*/
public Builder clearStorageType() {
bitField0_ = (bitField0_ & ~0x00000004);
storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeStorageProto)
}
static {
defaultInstance = new DatanodeStorageProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeStorageProto)
}
public interface StorageReportProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string storageUuid = 1 [deprecated = true];
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated boolean hasStorageUuid();
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated java.lang.String getStorageUuid();
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated com.google.protobuf.ByteString
getStorageUuidBytes();
// optional bool failed = 2 [default = false];
/**
* optional bool failed = 2 [default = false];
*/
boolean hasFailed();
/**
* optional bool failed = 2 [default = false];
*/
boolean getFailed();
// optional uint64 capacity = 3 [default = 0];
/**
* optional uint64 capacity = 3 [default = 0];
*/
boolean hasCapacity();
/**
* optional uint64 capacity = 3 [default = 0];
*/
long getCapacity();
// optional uint64 dfsUsed = 4 [default = 0];
/**
* optional uint64 dfsUsed = 4 [default = 0];
*/
boolean hasDfsUsed();
/**
* optional uint64 dfsUsed = 4 [default = 0];
*/
long getDfsUsed();
// optional uint64 remaining = 5 [default = 0];
/**
* optional uint64 remaining = 5 [default = 0];
*/
boolean hasRemaining();
/**
* optional uint64 remaining = 5 [default = 0];
*/
long getRemaining();
// optional uint64 blockPoolUsed = 6 [default = 0];
/**
* optional uint64 blockPoolUsed = 6 [default = 0];
*/
boolean hasBlockPoolUsed();
/**
* optional uint64 blockPoolUsed = 6 [default = 0];
*/
long getBlockPoolUsed();
// optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
*
*
* supersedes StorageUuid
*
*/
boolean hasStorage();
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
*
*
* supersedes StorageUuid
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getStorage();
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
*
*
* supersedes StorageUuid
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder();
// optional uint64 nonDfsUsed = 8;
/**
* optional uint64 nonDfsUsed = 8;
*/
boolean hasNonDfsUsed();
/**
* optional uint64 nonDfsUsed = 8;
*/
long getNonDfsUsed();
}
/**
* Protobuf type {@code hadoop.hdfs.StorageReportProto}
*/
public static final class StorageReportProto extends
com.google.protobuf.GeneratedMessage
implements StorageReportProtoOrBuilder {
// Use StorageReportProto.newBuilder() to construct.
private StorageReportProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private StorageReportProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final StorageReportProto defaultInstance;
public static StorageReportProto getDefaultInstance() {
return defaultInstance;
}
public StorageReportProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private StorageReportProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
storageUuid_ = input.readBytes();
break;
}
case 16: {
bitField0_ |= 0x00000002;
failed_ = input.readBool();
break;
}
case 24: {
bitField0_ |= 0x00000004;
capacity_ = input.readUInt64();
break;
}
case 32: {
bitField0_ |= 0x00000008;
dfsUsed_ = input.readUInt64();
break;
}
case 40: {
bitField0_ |= 0x00000010;
remaining_ = input.readUInt64();
break;
}
case 48: {
bitField0_ |= 0x00000020;
blockPoolUsed_ = input.readUInt64();
break;
}
case 58: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000040) == 0x00000040)) {
subBuilder = storage_.toBuilder();
}
storage_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(storage_);
storage_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000040;
break;
}
case 64: {
bitField0_ |= 0x00000080;
nonDfsUsed_ = input.readUInt64();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageReportProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageReportProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public StorageReportProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new StorageReportProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required string storageUuid = 1 [deprecated = true];
public static final int STORAGEUUID_FIELD_NUMBER = 1;
private java.lang.Object storageUuid_;
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated public boolean hasStorageUuid() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated public java.lang.String getStorageUuid() {
java.lang.Object ref = storageUuid_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
storageUuid_ = s;
}
return s;
}
}
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated public com.google.protobuf.ByteString
getStorageUuidBytes() {
java.lang.Object ref = storageUuid_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
storageUuid_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// optional bool failed = 2 [default = false];
public static final int FAILED_FIELD_NUMBER = 2;
private boolean failed_;
/**
* optional bool failed = 2 [default = false];
*/
public boolean hasFailed() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional bool failed = 2 [default = false];
*/
public boolean getFailed() {
return failed_;
}
// optional uint64 capacity = 3 [default = 0];
public static final int CAPACITY_FIELD_NUMBER = 3;
private long capacity_;
/**
* optional uint64 capacity = 3 [default = 0];
*/
public boolean hasCapacity() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional uint64 capacity = 3 [default = 0];
*/
public long getCapacity() {
return capacity_;
}
// optional uint64 dfsUsed = 4 [default = 0];
public static final int DFSUSED_FIELD_NUMBER = 4;
private long dfsUsed_;
/**
* optional uint64 dfsUsed = 4 [default = 0];
*/
public boolean hasDfsUsed() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional uint64 dfsUsed = 4 [default = 0];
*/
public long getDfsUsed() {
return dfsUsed_;
}
// optional uint64 remaining = 5 [default = 0];
public static final int REMAINING_FIELD_NUMBER = 5;
private long remaining_;
/**
* optional uint64 remaining = 5 [default = 0];
*/
public boolean hasRemaining() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional uint64 remaining = 5 [default = 0];
*/
public long getRemaining() {
return remaining_;
}
// optional uint64 blockPoolUsed = 6 [default = 0];
public static final int BLOCKPOOLUSED_FIELD_NUMBER = 6;
private long blockPoolUsed_;
/**
* optional uint64 blockPoolUsed = 6 [default = 0];
*/
public boolean hasBlockPoolUsed() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional uint64 blockPoolUsed = 6 [default = 0];
*/
public long getBlockPoolUsed() {
return blockPoolUsed_;
}
// optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
public static final int STORAGE_FIELD_NUMBER = 7;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto storage_;
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
*
*
* supersedes StorageUuid
*
*/
public boolean hasStorage() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
*
*
* supersedes StorageUuid
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getStorage() {
return storage_;
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
*
*
* supersedes StorageUuid
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder() {
return storage_;
}
// optional uint64 nonDfsUsed = 8;
public static final int NONDFSUSED_FIELD_NUMBER = 8;
private long nonDfsUsed_;
/**
* optional uint64 nonDfsUsed = 8;
*/
public boolean hasNonDfsUsed() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* optional uint64 nonDfsUsed = 8;
*/
public long getNonDfsUsed() {
return nonDfsUsed_;
}
private void initFields() {
storageUuid_ = "";
failed_ = false;
capacity_ = 0L;
dfsUsed_ = 0L;
remaining_ = 0L;
blockPoolUsed_ = 0L;
storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance();
nonDfsUsed_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasStorageUuid()) {
memoizedIsInitialized = 0;
return false;
}
if (hasStorage()) {
if (!getStorage().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getStorageUuidBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBool(2, failed_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt64(3, capacity_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt64(4, dfsUsed_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeUInt64(5, remaining_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeUInt64(6, blockPoolUsed_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeMessage(7, storage_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
output.writeUInt64(8, nonDfsUsed_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getStorageUuidBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(2, failed_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(3, capacity_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(4, dfsUsed_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(5, remaining_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(6, blockPoolUsed_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(7, storage_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(8, nonDfsUsed_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto) obj;
boolean result = true;
result = result && (hasStorageUuid() == other.hasStorageUuid());
if (hasStorageUuid()) {
result = result && getStorageUuid()
.equals(other.getStorageUuid());
}
result = result && (hasFailed() == other.hasFailed());
if (hasFailed()) {
result = result && (getFailed()
== other.getFailed());
}
result = result && (hasCapacity() == other.hasCapacity());
if (hasCapacity()) {
result = result && (getCapacity()
== other.getCapacity());
}
result = result && (hasDfsUsed() == other.hasDfsUsed());
if (hasDfsUsed()) {
result = result && (getDfsUsed()
== other.getDfsUsed());
}
result = result && (hasRemaining() == other.hasRemaining());
if (hasRemaining()) {
result = result && (getRemaining()
== other.getRemaining());
}
result = result && (hasBlockPoolUsed() == other.hasBlockPoolUsed());
if (hasBlockPoolUsed()) {
result = result && (getBlockPoolUsed()
== other.getBlockPoolUsed());
}
result = result && (hasStorage() == other.hasStorage());
if (hasStorage()) {
result = result && getStorage()
.equals(other.getStorage());
}
result = result && (hasNonDfsUsed() == other.hasNonDfsUsed());
if (hasNonDfsUsed()) {
result = result && (getNonDfsUsed()
== other.getNonDfsUsed());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasStorageUuid()) {
hash = (37 * hash) + STORAGEUUID_FIELD_NUMBER;
hash = (53 * hash) + getStorageUuid().hashCode();
}
if (hasFailed()) {
hash = (37 * hash) + FAILED_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getFailed());
}
if (hasCapacity()) {
hash = (37 * hash) + CAPACITY_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getCapacity());
}
if (hasDfsUsed()) {
hash = (37 * hash) + DFSUSED_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getDfsUsed());
}
if (hasRemaining()) {
hash = (37 * hash) + REMAINING_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getRemaining());
}
if (hasBlockPoolUsed()) {
hash = (37 * hash) + BLOCKPOOLUSED_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getBlockPoolUsed());
}
if (hasStorage()) {
hash = (37 * hash) + STORAGE_FIELD_NUMBER;
hash = (53 * hash) + getStorage().hashCode();
}
if (hasNonDfsUsed()) {
hash = (37 * hash) + NONDFSUSED_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getNonDfsUsed());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.StorageReportProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageReportProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageReportProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getStorageFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
storageUuid_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
failed_ = false;
bitField0_ = (bitField0_ & ~0x00000002);
capacity_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
dfsUsed_ = 0L;
bitField0_ = (bitField0_ & ~0x00000008);
remaining_ = 0L;
bitField0_ = (bitField0_ & ~0x00000010);
blockPoolUsed_ = 0L;
bitField0_ = (bitField0_ & ~0x00000020);
if (storageBuilder_ == null) {
storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance();
} else {
storageBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000040);
nonDfsUsed_ = 0L;
bitField0_ = (bitField0_ & ~0x00000080);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageReportProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.storageUuid_ = storageUuid_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.failed_ = failed_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.capacity_ = capacity_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.dfsUsed_ = dfsUsed_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.remaining_ = remaining_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000020;
}
result.blockPoolUsed_ = blockPoolUsed_;
if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
to_bitField0_ |= 0x00000040;
}
if (storageBuilder_ == null) {
result.storage_ = storage_;
} else {
result.storage_ = storageBuilder_.build();
}
if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
to_bitField0_ |= 0x00000080;
}
result.nonDfsUsed_ = nonDfsUsed_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.getDefaultInstance()) return this;
if (other.hasStorageUuid()) {
bitField0_ |= 0x00000001;
storageUuid_ = other.storageUuid_;
onChanged();
}
if (other.hasFailed()) {
setFailed(other.getFailed());
}
if (other.hasCapacity()) {
setCapacity(other.getCapacity());
}
if (other.hasDfsUsed()) {
setDfsUsed(other.getDfsUsed());
}
if (other.hasRemaining()) {
setRemaining(other.getRemaining());
}
if (other.hasBlockPoolUsed()) {
setBlockPoolUsed(other.getBlockPoolUsed());
}
if (other.hasStorage()) {
mergeStorage(other.getStorage());
}
if (other.hasNonDfsUsed()) {
setNonDfsUsed(other.getNonDfsUsed());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasStorageUuid()) {
return false;
}
if (hasStorage()) {
if (!getStorage().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string storageUuid = 1 [deprecated = true];
private java.lang.Object storageUuid_ = "";
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated public boolean hasStorageUuid() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated public java.lang.String getStorageUuid() {
java.lang.Object ref = storageUuid_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
storageUuid_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated public com.google.protobuf.ByteString
getStorageUuidBytes() {
java.lang.Object ref = storageUuid_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
storageUuid_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated public Builder setStorageUuid(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
storageUuid_ = value;
onChanged();
return this;
}
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated public Builder clearStorageUuid() {
bitField0_ = (bitField0_ & ~0x00000001);
storageUuid_ = getDefaultInstance().getStorageUuid();
onChanged();
return this;
}
/**
* required string storageUuid = 1 [deprecated = true];
*/
@java.lang.Deprecated public Builder setStorageUuidBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
storageUuid_ = value;
onChanged();
return this;
}
// optional bool failed = 2 [default = false];
private boolean failed_ ;
/**
* optional bool failed = 2 [default = false];
*/
public boolean hasFailed() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional bool failed = 2 [default = false];
*/
public boolean getFailed() {
return failed_;
}
/**
* optional bool failed = 2 [default = false];
*/
public Builder setFailed(boolean value) {
bitField0_ |= 0x00000002;
failed_ = value;
onChanged();
return this;
}
/**
* optional bool failed = 2 [default = false];
*/
public Builder clearFailed() {
bitField0_ = (bitField0_ & ~0x00000002);
failed_ = false;
onChanged();
return this;
}
// optional uint64 capacity = 3 [default = 0];
private long capacity_ ;
/**
* optional uint64 capacity = 3 [default = 0];
*/
public boolean hasCapacity() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional uint64 capacity = 3 [default = 0];
*/
public long getCapacity() {
return capacity_;
}
/**
* optional uint64 capacity = 3 [default = 0];
*/
public Builder setCapacity(long value) {
bitField0_ |= 0x00000004;
capacity_ = value;
onChanged();
return this;
}
/**
* optional uint64 capacity = 3 [default = 0];
*/
public Builder clearCapacity() {
bitField0_ = (bitField0_ & ~0x00000004);
capacity_ = 0L;
onChanged();
return this;
}
// optional uint64 dfsUsed = 4 [default = 0];
private long dfsUsed_ ;
/**
* optional uint64 dfsUsed = 4 [default = 0];
*/
public boolean hasDfsUsed() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional uint64 dfsUsed = 4 [default = 0];
*/
public long getDfsUsed() {
return dfsUsed_;
}
/**
* optional uint64 dfsUsed = 4 [default = 0];
*/
public Builder setDfsUsed(long value) {
bitField0_ |= 0x00000008;
dfsUsed_ = value;
onChanged();
return this;
}
/**
* optional uint64 dfsUsed = 4 [default = 0];
*/
public Builder clearDfsUsed() {
bitField0_ = (bitField0_ & ~0x00000008);
dfsUsed_ = 0L;
onChanged();
return this;
}
// optional uint64 remaining = 5 [default = 0];
private long remaining_ ;
/**
* optional uint64 remaining = 5 [default = 0];
*/
public boolean hasRemaining() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional uint64 remaining = 5 [default = 0];
*/
public long getRemaining() {
return remaining_;
}
/**
* optional uint64 remaining = 5 [default = 0];
*/
public Builder setRemaining(long value) {
bitField0_ |= 0x00000010;
remaining_ = value;
onChanged();
return this;
}
/**
* optional uint64 remaining = 5 [default = 0];
*/
public Builder clearRemaining() {
bitField0_ = (bitField0_ & ~0x00000010);
remaining_ = 0L;
onChanged();
return this;
}
// optional uint64 blockPoolUsed = 6 [default = 0];
private long blockPoolUsed_ ;
/**
* optional uint64 blockPoolUsed = 6 [default = 0];
*/
public boolean hasBlockPoolUsed() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional uint64 blockPoolUsed = 6 [default = 0];
*/
public long getBlockPoolUsed() {
return blockPoolUsed_;
}
/**
* optional uint64 blockPoolUsed = 6 [default = 0];
*/
public Builder setBlockPoolUsed(long value) {
bitField0_ |= 0x00000020;
blockPoolUsed_ = value;
onChanged();
return this;
}
/**
* optional uint64 blockPoolUsed = 6 [default = 0];
*/
public Builder clearBlockPoolUsed() {
bitField0_ = (bitField0_ & ~0x00000020);
blockPoolUsed_ = 0L;
onChanged();
return this;
}
// optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder> storageBuilder_;
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
*
*
* supersedes StorageUuid
*
*/
public boolean hasStorage() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
*
*
* supersedes StorageUuid
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getStorage() {
if (storageBuilder_ == null) {
return storage_;
} else {
return storageBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
*
*
* supersedes StorageUuid
*
*/
public Builder setStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto value) {
if (storageBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
storage_ = value;
onChanged();
} else {
storageBuilder_.setMessage(value);
}
bitField0_ |= 0x00000040;
return this;
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
*
*
* supersedes StorageUuid
*
*/
public Builder setStorage(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder builderForValue) {
if (storageBuilder_ == null) {
storage_ = builderForValue.build();
onChanged();
} else {
storageBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000040;
return this;
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
*
*
* supersedes StorageUuid
*
*/
public Builder mergeStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto value) {
if (storageBuilder_ == null) {
if (((bitField0_ & 0x00000040) == 0x00000040) &&
storage_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance()) {
storage_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.newBuilder(storage_).mergeFrom(value).buildPartial();
} else {
storage_ = value;
}
onChanged();
} else {
storageBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000040;
return this;
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
*
*
* supersedes StorageUuid
*
*/
public Builder clearStorage() {
if (storageBuilder_ == null) {
storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance();
onChanged();
} else {
storageBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000040);
return this;
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
*
*
* supersedes StorageUuid
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder getStorageBuilder() {
bitField0_ |= 0x00000040;
onChanged();
return getStorageFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
*
*
* supersedes StorageUuid
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder() {
if (storageBuilder_ != null) {
return storageBuilder_.getMessageOrBuilder();
} else {
return storage_;
}
}
/**
* optional .hadoop.hdfs.DatanodeStorageProto storage = 7;
*
*
* supersedes StorageUuid
*
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder>
getStorageFieldBuilder() {
if (storageBuilder_ == null) {
storageBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder>(
storage_,
getParentForChildren(),
isClean());
storage_ = null;
}
return storageBuilder_;
}
// optional uint64 nonDfsUsed = 8;
private long nonDfsUsed_ ;
/**
* optional uint64 nonDfsUsed = 8;
*/
public boolean hasNonDfsUsed() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* optional uint64 nonDfsUsed = 8;
*/
public long getNonDfsUsed() {
return nonDfsUsed_;
}
/**
* optional uint64 nonDfsUsed = 8;
*/
public Builder setNonDfsUsed(long value) {
bitField0_ |= 0x00000080;
nonDfsUsed_ = value;
onChanged();
return this;
}
/**
* optional uint64 nonDfsUsed = 8;
*/
public Builder clearNonDfsUsed() {
bitField0_ = (bitField0_ & ~0x00000080);
nonDfsUsed_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageReportProto)
}
static {
defaultInstance = new StorageReportProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageReportProto)
}
public interface ContentSummaryProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required uint64 length = 1;
/**
* required uint64 length = 1;
*/
boolean hasLength();
/**
* required uint64 length = 1;
*/
long getLength();
// required uint64 fileCount = 2;
/**
* required uint64 fileCount = 2;
*/
boolean hasFileCount();
/**
* required uint64 fileCount = 2;
*/
long getFileCount();
// required uint64 directoryCount = 3;
/**
* required uint64 directoryCount = 3;
*/
boolean hasDirectoryCount();
/**
* required uint64 directoryCount = 3;
*/
long getDirectoryCount();
// required uint64 quota = 4;
/**
* required uint64 quota = 4;
*/
boolean hasQuota();
/**
* required uint64 quota = 4;
*/
long getQuota();
// required uint64 spaceConsumed = 5;
/**
* required uint64 spaceConsumed = 5;
*/
boolean hasSpaceConsumed();
/**
* required uint64 spaceConsumed = 5;
*/
long getSpaceConsumed();
// required uint64 spaceQuota = 6;
/**
* required uint64 spaceQuota = 6;
*/
boolean hasSpaceQuota();
/**
* required uint64 spaceQuota = 6;
*/
long getSpaceQuota();
// optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
*/
boolean hasTypeQuotaInfos();
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getTypeQuotaInfos();
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder getTypeQuotaInfosOrBuilder();
// optional uint64 snapshotLength = 8;
/**
* optional uint64 snapshotLength = 8;
*/
boolean hasSnapshotLength();
/**
* optional uint64 snapshotLength = 8;
*/
long getSnapshotLength();
// optional uint64 snapshotFileCount = 9;
/**
* optional uint64 snapshotFileCount = 9;
*/
boolean hasSnapshotFileCount();
/**
* optional uint64 snapshotFileCount = 9;
*/
long getSnapshotFileCount();
// optional uint64 snapshotDirectoryCount = 10;
/**
* optional uint64 snapshotDirectoryCount = 10;
*/
boolean hasSnapshotDirectoryCount();
/**
* optional uint64 snapshotDirectoryCount = 10;
*/
long getSnapshotDirectoryCount();
// optional uint64 snapshotSpaceConsumed = 11;
/**
* optional uint64 snapshotSpaceConsumed = 11;
*/
boolean hasSnapshotSpaceConsumed();
/**
* optional uint64 snapshotSpaceConsumed = 11;
*/
long getSnapshotSpaceConsumed();
// optional string erasureCodingPolicy = 12;
/**
* optional string erasureCodingPolicy = 12;
*/
boolean hasErasureCodingPolicy();
/**
* optional string erasureCodingPolicy = 12;
*/
java.lang.String getErasureCodingPolicy();
/**
* optional string erasureCodingPolicy = 12;
*/
com.google.protobuf.ByteString
getErasureCodingPolicyBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.ContentSummaryProto}
*
*
**
* Summary of a file or directory
*
*/
public static final class ContentSummaryProto extends
com.google.protobuf.GeneratedMessage
implements ContentSummaryProtoOrBuilder {
// Use ContentSummaryProto.newBuilder() to construct.
private ContentSummaryProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ContentSummaryProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ContentSummaryProto defaultInstance;
public static ContentSummaryProto getDefaultInstance() {
return defaultInstance;
}
public ContentSummaryProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ContentSummaryProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
length_ = input.readUInt64();
break;
}
case 16: {
bitField0_ |= 0x00000002;
fileCount_ = input.readUInt64();
break;
}
case 24: {
bitField0_ |= 0x00000004;
directoryCount_ = input.readUInt64();
break;
}
case 32: {
bitField0_ |= 0x00000008;
quota_ = input.readUInt64();
break;
}
case 40: {
bitField0_ |= 0x00000010;
spaceConsumed_ = input.readUInt64();
break;
}
case 48: {
bitField0_ |= 0x00000020;
spaceQuota_ = input.readUInt64();
break;
}
case 58: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000040) == 0x00000040)) {
subBuilder = typeQuotaInfos_.toBuilder();
}
typeQuotaInfos_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(typeQuotaInfos_);
typeQuotaInfos_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000040;
break;
}
case 64: {
bitField0_ |= 0x00000080;
snapshotLength_ = input.readUInt64();
break;
}
case 72: {
bitField0_ |= 0x00000100;
snapshotFileCount_ = input.readUInt64();
break;
}
case 80: {
bitField0_ |= 0x00000200;
snapshotDirectoryCount_ = input.readUInt64();
break;
}
case 88: {
bitField0_ |= 0x00000400;
snapshotSpaceConsumed_ = input.readUInt64();
break;
}
case 98: {
bitField0_ |= 0x00000800;
erasureCodingPolicy_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ContentSummaryProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ContentSummaryProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ContentSummaryProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ContentSummaryProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required uint64 length = 1;
public static final int LENGTH_FIELD_NUMBER = 1;
private long length_;
/**
* required uint64 length = 1;
*/
public boolean hasLength() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint64 length = 1;
*/
public long getLength() {
return length_;
}
// required uint64 fileCount = 2;
public static final int FILECOUNT_FIELD_NUMBER = 2;
private long fileCount_;
/**
* required uint64 fileCount = 2;
*/
public boolean hasFileCount() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 fileCount = 2;
*/
public long getFileCount() {
return fileCount_;
}
// required uint64 directoryCount = 3;
public static final int DIRECTORYCOUNT_FIELD_NUMBER = 3;
private long directoryCount_;
/**
* required uint64 directoryCount = 3;
*/
public boolean hasDirectoryCount() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 directoryCount = 3;
*/
public long getDirectoryCount() {
return directoryCount_;
}
// required uint64 quota = 4;
public static final int QUOTA_FIELD_NUMBER = 4;
private long quota_;
/**
* required uint64 quota = 4;
*/
public boolean hasQuota() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required uint64 quota = 4;
*/
public long getQuota() {
return quota_;
}
// required uint64 spaceConsumed = 5;
public static final int SPACECONSUMED_FIELD_NUMBER = 5;
private long spaceConsumed_;
/**
* required uint64 spaceConsumed = 5;
*/
public boolean hasSpaceConsumed() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required uint64 spaceConsumed = 5;
*/
public long getSpaceConsumed() {
return spaceConsumed_;
}
// required uint64 spaceQuota = 6;
public static final int SPACEQUOTA_FIELD_NUMBER = 6;
private long spaceQuota_;
/**
* required uint64 spaceQuota = 6;
*/
public boolean hasSpaceQuota() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* required uint64 spaceQuota = 6;
*/
public long getSpaceQuota() {
return spaceQuota_;
}
// optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
public static final int TYPEQUOTAINFOS_FIELD_NUMBER = 7;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto typeQuotaInfos_;
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
*/
public boolean hasTypeQuotaInfos() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getTypeQuotaInfos() {
return typeQuotaInfos_;
}
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder getTypeQuotaInfosOrBuilder() {
return typeQuotaInfos_;
}
// optional uint64 snapshotLength = 8;
public static final int SNAPSHOTLENGTH_FIELD_NUMBER = 8;
private long snapshotLength_;
/**
* optional uint64 snapshotLength = 8;
*/
public boolean hasSnapshotLength() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* optional uint64 snapshotLength = 8;
*/
public long getSnapshotLength() {
return snapshotLength_;
}
// optional uint64 snapshotFileCount = 9;
public static final int SNAPSHOTFILECOUNT_FIELD_NUMBER = 9;
private long snapshotFileCount_;
/**
* optional uint64 snapshotFileCount = 9;
*/
public boolean hasSnapshotFileCount() {
return ((bitField0_ & 0x00000100) == 0x00000100);
}
/**
* optional uint64 snapshotFileCount = 9;
*/
public long getSnapshotFileCount() {
return snapshotFileCount_;
}
// optional uint64 snapshotDirectoryCount = 10;
public static final int SNAPSHOTDIRECTORYCOUNT_FIELD_NUMBER = 10;
private long snapshotDirectoryCount_;
/**
* optional uint64 snapshotDirectoryCount = 10;
*/
public boolean hasSnapshotDirectoryCount() {
return ((bitField0_ & 0x00000200) == 0x00000200);
}
/**
* optional uint64 snapshotDirectoryCount = 10;
*/
public long getSnapshotDirectoryCount() {
return snapshotDirectoryCount_;
}
// optional uint64 snapshotSpaceConsumed = 11;
public static final int SNAPSHOTSPACECONSUMED_FIELD_NUMBER = 11;
private long snapshotSpaceConsumed_;
/**
* optional uint64 snapshotSpaceConsumed = 11;
*/
public boolean hasSnapshotSpaceConsumed() {
return ((bitField0_ & 0x00000400) == 0x00000400);
}
/**
* optional uint64 snapshotSpaceConsumed = 11;
*/
public long getSnapshotSpaceConsumed() {
return snapshotSpaceConsumed_;
}
// optional string erasureCodingPolicy = 12;
public static final int ERASURECODINGPOLICY_FIELD_NUMBER = 12;
private java.lang.Object erasureCodingPolicy_;
/**
* optional string erasureCodingPolicy = 12;
*/
public boolean hasErasureCodingPolicy() {
return ((bitField0_ & 0x00000800) == 0x00000800);
}
/**
* optional string erasureCodingPolicy = 12;
*/
public java.lang.String getErasureCodingPolicy() {
java.lang.Object ref = erasureCodingPolicy_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
erasureCodingPolicy_ = s;
}
return s;
}
}
/**
* optional string erasureCodingPolicy = 12;
*/
public com.google.protobuf.ByteString
getErasureCodingPolicyBytes() {
java.lang.Object ref = erasureCodingPolicy_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
erasureCodingPolicy_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
length_ = 0L;
fileCount_ = 0L;
directoryCount_ = 0L;
quota_ = 0L;
spaceConsumed_ = 0L;
spaceQuota_ = 0L;
typeQuotaInfos_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance();
snapshotLength_ = 0L;
snapshotFileCount_ = 0L;
snapshotDirectoryCount_ = 0L;
snapshotSpaceConsumed_ = 0L;
erasureCodingPolicy_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasLength()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasFileCount()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasDirectoryCount()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasQuota()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasSpaceConsumed()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasSpaceQuota()) {
memoizedIsInitialized = 0;
return false;
}
if (hasTypeQuotaInfos()) {
if (!getTypeQuotaInfos().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt64(1, length_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, fileCount_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt64(3, directoryCount_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt64(4, quota_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeUInt64(5, spaceConsumed_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeUInt64(6, spaceQuota_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeMessage(7, typeQuotaInfos_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
output.writeUInt64(8, snapshotLength_);
}
if (((bitField0_ & 0x00000100) == 0x00000100)) {
output.writeUInt64(9, snapshotFileCount_);
}
if (((bitField0_ & 0x00000200) == 0x00000200)) {
output.writeUInt64(10, snapshotDirectoryCount_);
}
if (((bitField0_ & 0x00000400) == 0x00000400)) {
output.writeUInt64(11, snapshotSpaceConsumed_);
}
if (((bitField0_ & 0x00000800) == 0x00000800)) {
output.writeBytes(12, getErasureCodingPolicyBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(1, length_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, fileCount_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(3, directoryCount_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(4, quota_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(5, spaceConsumed_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(6, spaceQuota_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(7, typeQuotaInfos_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(8, snapshotLength_);
}
if (((bitField0_ & 0x00000100) == 0x00000100)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(9, snapshotFileCount_);
}
if (((bitField0_ & 0x00000200) == 0x00000200)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(10, snapshotDirectoryCount_);
}
if (((bitField0_ & 0x00000400) == 0x00000400)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(11, snapshotSpaceConsumed_);
}
if (((bitField0_ & 0x00000800) == 0x00000800)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(12, getErasureCodingPolicyBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto) obj;
boolean result = true;
result = result && (hasLength() == other.hasLength());
if (hasLength()) {
result = result && (getLength()
== other.getLength());
}
result = result && (hasFileCount() == other.hasFileCount());
if (hasFileCount()) {
result = result && (getFileCount()
== other.getFileCount());
}
result = result && (hasDirectoryCount() == other.hasDirectoryCount());
if (hasDirectoryCount()) {
result = result && (getDirectoryCount()
== other.getDirectoryCount());
}
result = result && (hasQuota() == other.hasQuota());
if (hasQuota()) {
result = result && (getQuota()
== other.getQuota());
}
result = result && (hasSpaceConsumed() == other.hasSpaceConsumed());
if (hasSpaceConsumed()) {
result = result && (getSpaceConsumed()
== other.getSpaceConsumed());
}
result = result && (hasSpaceQuota() == other.hasSpaceQuota());
if (hasSpaceQuota()) {
result = result && (getSpaceQuota()
== other.getSpaceQuota());
}
result = result && (hasTypeQuotaInfos() == other.hasTypeQuotaInfos());
if (hasTypeQuotaInfos()) {
result = result && getTypeQuotaInfos()
.equals(other.getTypeQuotaInfos());
}
result = result && (hasSnapshotLength() == other.hasSnapshotLength());
if (hasSnapshotLength()) {
result = result && (getSnapshotLength()
== other.getSnapshotLength());
}
result = result && (hasSnapshotFileCount() == other.hasSnapshotFileCount());
if (hasSnapshotFileCount()) {
result = result && (getSnapshotFileCount()
== other.getSnapshotFileCount());
}
result = result && (hasSnapshotDirectoryCount() == other.hasSnapshotDirectoryCount());
if (hasSnapshotDirectoryCount()) {
result = result && (getSnapshotDirectoryCount()
== other.getSnapshotDirectoryCount());
}
result = result && (hasSnapshotSpaceConsumed() == other.hasSnapshotSpaceConsumed());
if (hasSnapshotSpaceConsumed()) {
result = result && (getSnapshotSpaceConsumed()
== other.getSnapshotSpaceConsumed());
}
result = result && (hasErasureCodingPolicy() == other.hasErasureCodingPolicy());
if (hasErasureCodingPolicy()) {
result = result && getErasureCodingPolicy()
.equals(other.getErasureCodingPolicy());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasLength()) {
hash = (37 * hash) + LENGTH_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getLength());
}
if (hasFileCount()) {
hash = (37 * hash) + FILECOUNT_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getFileCount());
}
if (hasDirectoryCount()) {
hash = (37 * hash) + DIRECTORYCOUNT_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getDirectoryCount());
}
if (hasQuota()) {
hash = (37 * hash) + QUOTA_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getQuota());
}
if (hasSpaceConsumed()) {
hash = (37 * hash) + SPACECONSUMED_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getSpaceConsumed());
}
if (hasSpaceQuota()) {
hash = (37 * hash) + SPACEQUOTA_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getSpaceQuota());
}
if (hasTypeQuotaInfos()) {
hash = (37 * hash) + TYPEQUOTAINFOS_FIELD_NUMBER;
hash = (53 * hash) + getTypeQuotaInfos().hashCode();
}
if (hasSnapshotLength()) {
hash = (37 * hash) + SNAPSHOTLENGTH_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getSnapshotLength());
}
if (hasSnapshotFileCount()) {
hash = (37 * hash) + SNAPSHOTFILECOUNT_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getSnapshotFileCount());
}
if (hasSnapshotDirectoryCount()) {
hash = (37 * hash) + SNAPSHOTDIRECTORYCOUNT_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getSnapshotDirectoryCount());
}
if (hasSnapshotSpaceConsumed()) {
hash = (37 * hash) + SNAPSHOTSPACECONSUMED_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getSnapshotSpaceConsumed());
}
if (hasErasureCodingPolicy()) {
hash = (37 * hash) + ERASURECODINGPOLICY_FIELD_NUMBER;
hash = (53 * hash) + getErasureCodingPolicy().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ContentSummaryProto}
*
*
**
* Summary of a file or directory
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ContentSummaryProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ContentSummaryProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getTypeQuotaInfosFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
length_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
fileCount_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
directoryCount_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
quota_ = 0L;
bitField0_ = (bitField0_ & ~0x00000008);
spaceConsumed_ = 0L;
bitField0_ = (bitField0_ & ~0x00000010);
spaceQuota_ = 0L;
bitField0_ = (bitField0_ & ~0x00000020);
if (typeQuotaInfosBuilder_ == null) {
typeQuotaInfos_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance();
} else {
typeQuotaInfosBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000040);
snapshotLength_ = 0L;
bitField0_ = (bitField0_ & ~0x00000080);
snapshotFileCount_ = 0L;
bitField0_ = (bitField0_ & ~0x00000100);
snapshotDirectoryCount_ = 0L;
bitField0_ = (bitField0_ & ~0x00000200);
snapshotSpaceConsumed_ = 0L;
bitField0_ = (bitField0_ & ~0x00000400);
erasureCodingPolicy_ = "";
bitField0_ = (bitField0_ & ~0x00000800);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ContentSummaryProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.length_ = length_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.fileCount_ = fileCount_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.directoryCount_ = directoryCount_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.quota_ = quota_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.spaceConsumed_ = spaceConsumed_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000020;
}
result.spaceQuota_ = spaceQuota_;
if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
to_bitField0_ |= 0x00000040;
}
if (typeQuotaInfosBuilder_ == null) {
result.typeQuotaInfos_ = typeQuotaInfos_;
} else {
result.typeQuotaInfos_ = typeQuotaInfosBuilder_.build();
}
if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
to_bitField0_ |= 0x00000080;
}
result.snapshotLength_ = snapshotLength_;
if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
to_bitField0_ |= 0x00000100;
}
result.snapshotFileCount_ = snapshotFileCount_;
if (((from_bitField0_ & 0x00000200) == 0x00000200)) {
to_bitField0_ |= 0x00000200;
}
result.snapshotDirectoryCount_ = snapshotDirectoryCount_;
if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
to_bitField0_ |= 0x00000400;
}
result.snapshotSpaceConsumed_ = snapshotSpaceConsumed_;
if (((from_bitField0_ & 0x00000800) == 0x00000800)) {
to_bitField0_ |= 0x00000800;
}
result.erasureCodingPolicy_ = erasureCodingPolicy_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDefaultInstance()) return this;
if (other.hasLength()) {
setLength(other.getLength());
}
if (other.hasFileCount()) {
setFileCount(other.getFileCount());
}
if (other.hasDirectoryCount()) {
setDirectoryCount(other.getDirectoryCount());
}
if (other.hasQuota()) {
setQuota(other.getQuota());
}
if (other.hasSpaceConsumed()) {
setSpaceConsumed(other.getSpaceConsumed());
}
if (other.hasSpaceQuota()) {
setSpaceQuota(other.getSpaceQuota());
}
if (other.hasTypeQuotaInfos()) {
mergeTypeQuotaInfos(other.getTypeQuotaInfos());
}
if (other.hasSnapshotLength()) {
setSnapshotLength(other.getSnapshotLength());
}
if (other.hasSnapshotFileCount()) {
setSnapshotFileCount(other.getSnapshotFileCount());
}
if (other.hasSnapshotDirectoryCount()) {
setSnapshotDirectoryCount(other.getSnapshotDirectoryCount());
}
if (other.hasSnapshotSpaceConsumed()) {
setSnapshotSpaceConsumed(other.getSnapshotSpaceConsumed());
}
if (other.hasErasureCodingPolicy()) {
bitField0_ |= 0x00000800;
erasureCodingPolicy_ = other.erasureCodingPolicy_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasLength()) {
return false;
}
if (!hasFileCount()) {
return false;
}
if (!hasDirectoryCount()) {
return false;
}
if (!hasQuota()) {
return false;
}
if (!hasSpaceConsumed()) {
return false;
}
if (!hasSpaceQuota()) {
return false;
}
if (hasTypeQuotaInfos()) {
if (!getTypeQuotaInfos().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required uint64 length = 1;
private long length_ ;
/**
* required uint64 length = 1;
*/
public boolean hasLength() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint64 length = 1;
*/
public long getLength() {
return length_;
}
/**
* required uint64 length = 1;
*/
public Builder setLength(long value) {
bitField0_ |= 0x00000001;
length_ = value;
onChanged();
return this;
}
/**
* required uint64 length = 1;
*/
public Builder clearLength() {
bitField0_ = (bitField0_ & ~0x00000001);
length_ = 0L;
onChanged();
return this;
}
// required uint64 fileCount = 2;
private long fileCount_ ;
/**
* required uint64 fileCount = 2;
*/
public boolean hasFileCount() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 fileCount = 2;
*/
public long getFileCount() {
return fileCount_;
}
/**
* required uint64 fileCount = 2;
*/
public Builder setFileCount(long value) {
bitField0_ |= 0x00000002;
fileCount_ = value;
onChanged();
return this;
}
/**
* required uint64 fileCount = 2;
*/
public Builder clearFileCount() {
bitField0_ = (bitField0_ & ~0x00000002);
fileCount_ = 0L;
onChanged();
return this;
}
// required uint64 directoryCount = 3;
private long directoryCount_ ;
/**
* required uint64 directoryCount = 3;
*/
public boolean hasDirectoryCount() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 directoryCount = 3;
*/
public long getDirectoryCount() {
return directoryCount_;
}
/**
* required uint64 directoryCount = 3;
*/
public Builder setDirectoryCount(long value) {
bitField0_ |= 0x00000004;
directoryCount_ = value;
onChanged();
return this;
}
/**
* required uint64 directoryCount = 3;
*/
public Builder clearDirectoryCount() {
bitField0_ = (bitField0_ & ~0x00000004);
directoryCount_ = 0L;
onChanged();
return this;
}
// required uint64 quota = 4;
private long quota_ ;
/**
* required uint64 quota = 4;
*/
public boolean hasQuota() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required uint64 quota = 4;
*/
public long getQuota() {
return quota_;
}
/**
* required uint64 quota = 4;
*/
public Builder setQuota(long value) {
bitField0_ |= 0x00000008;
quota_ = value;
onChanged();
return this;
}
/**
* required uint64 quota = 4;
*/
public Builder clearQuota() {
bitField0_ = (bitField0_ & ~0x00000008);
quota_ = 0L;
onChanged();
return this;
}
// required uint64 spaceConsumed = 5;
private long spaceConsumed_ ;
/**
* required uint64 spaceConsumed = 5;
*/
public boolean hasSpaceConsumed() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required uint64 spaceConsumed = 5;
*/
public long getSpaceConsumed() {
return spaceConsumed_;
}
/**
* required uint64 spaceConsumed = 5;
*/
public Builder setSpaceConsumed(long value) {
bitField0_ |= 0x00000010;
spaceConsumed_ = value;
onChanged();
return this;
}
/**
* required uint64 spaceConsumed = 5;
*/
public Builder clearSpaceConsumed() {
bitField0_ = (bitField0_ & ~0x00000010);
spaceConsumed_ = 0L;
onChanged();
return this;
}
// required uint64 spaceQuota = 6;
private long spaceQuota_ ;
/**
* required uint64 spaceQuota = 6;
*/
public boolean hasSpaceQuota() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* required uint64 spaceQuota = 6;
*/
public long getSpaceQuota() {
return spaceQuota_;
}
/**
* required uint64 spaceQuota = 6;
*/
public Builder setSpaceQuota(long value) {
bitField0_ |= 0x00000020;
spaceQuota_ = value;
onChanged();
return this;
}
/**
* required uint64 spaceQuota = 6;
*/
public Builder clearSpaceQuota() {
bitField0_ = (bitField0_ & ~0x00000020);
spaceQuota_ = 0L;
onChanged();
return this;
}
// optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto typeQuotaInfos_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder> typeQuotaInfosBuilder_;
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
*/
public boolean hasTypeQuotaInfos() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getTypeQuotaInfos() {
if (typeQuotaInfosBuilder_ == null) {
return typeQuotaInfos_;
} else {
return typeQuotaInfosBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
*/
public Builder setTypeQuotaInfos(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto value) {
if (typeQuotaInfosBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
typeQuotaInfos_ = value;
onChanged();
} else {
typeQuotaInfosBuilder_.setMessage(value);
}
bitField0_ |= 0x00000040;
return this;
}
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
*/
public Builder setTypeQuotaInfos(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder builderForValue) {
if (typeQuotaInfosBuilder_ == null) {
typeQuotaInfos_ = builderForValue.build();
onChanged();
} else {
typeQuotaInfosBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000040;
return this;
}
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
*/
public Builder mergeTypeQuotaInfos(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto value) {
if (typeQuotaInfosBuilder_ == null) {
if (((bitField0_ & 0x00000040) == 0x00000040) &&
typeQuotaInfos_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance()) {
typeQuotaInfos_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.newBuilder(typeQuotaInfos_).mergeFrom(value).buildPartial();
} else {
typeQuotaInfos_ = value;
}
onChanged();
} else {
typeQuotaInfosBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000040;
return this;
}
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
*/
public Builder clearTypeQuotaInfos() {
if (typeQuotaInfosBuilder_ == null) {
typeQuotaInfos_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance();
onChanged();
} else {
typeQuotaInfosBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000040);
return this;
}
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder getTypeQuotaInfosBuilder() {
bitField0_ |= 0x00000040;
onChanged();
return getTypeQuotaInfosFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder getTypeQuotaInfosOrBuilder() {
if (typeQuotaInfosBuilder_ != null) {
return typeQuotaInfosBuilder_.getMessageOrBuilder();
} else {
return typeQuotaInfos_;
}
}
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder>
getTypeQuotaInfosFieldBuilder() {
if (typeQuotaInfosBuilder_ == null) {
typeQuotaInfosBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder>(
typeQuotaInfos_,
getParentForChildren(),
isClean());
typeQuotaInfos_ = null;
}
return typeQuotaInfosBuilder_;
}
// optional uint64 snapshotLength = 8;
private long snapshotLength_ ;
/**
* optional uint64 snapshotLength = 8;
*/
public boolean hasSnapshotLength() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* optional uint64 snapshotLength = 8;
*/
public long getSnapshotLength() {
return snapshotLength_;
}
/**
* optional uint64 snapshotLength = 8;
*/
public Builder setSnapshotLength(long value) {
bitField0_ |= 0x00000080;
snapshotLength_ = value;
onChanged();
return this;
}
/**
* optional uint64 snapshotLength = 8;
*/
public Builder clearSnapshotLength() {
bitField0_ = (bitField0_ & ~0x00000080);
snapshotLength_ = 0L;
onChanged();
return this;
}
// optional uint64 snapshotFileCount = 9;
private long snapshotFileCount_ ;
/**
* optional uint64 snapshotFileCount = 9;
*/
public boolean hasSnapshotFileCount() {
return ((bitField0_ & 0x00000100) == 0x00000100);
}
/**
* optional uint64 snapshotFileCount = 9;
*/
public long getSnapshotFileCount() {
return snapshotFileCount_;
}
/**
* optional uint64 snapshotFileCount = 9;
*/
public Builder setSnapshotFileCount(long value) {
bitField0_ |= 0x00000100;
snapshotFileCount_ = value;
onChanged();
return this;
}
/**
* optional uint64 snapshotFileCount = 9;
*/
public Builder clearSnapshotFileCount() {
bitField0_ = (bitField0_ & ~0x00000100);
snapshotFileCount_ = 0L;
onChanged();
return this;
}
// optional uint64 snapshotDirectoryCount = 10;
private long snapshotDirectoryCount_ ;
/**
* optional uint64 snapshotDirectoryCount = 10;
*/
public boolean hasSnapshotDirectoryCount() {
return ((bitField0_ & 0x00000200) == 0x00000200);
}
/**
* optional uint64 snapshotDirectoryCount = 10;
*/
public long getSnapshotDirectoryCount() {
return snapshotDirectoryCount_;
}
/**
* optional uint64 snapshotDirectoryCount = 10;
*/
public Builder setSnapshotDirectoryCount(long value) {
bitField0_ |= 0x00000200;
snapshotDirectoryCount_ = value;
onChanged();
return this;
}
/**
* optional uint64 snapshotDirectoryCount = 10;
*/
public Builder clearSnapshotDirectoryCount() {
bitField0_ = (bitField0_ & ~0x00000200);
snapshotDirectoryCount_ = 0L;
onChanged();
return this;
}
// optional uint64 snapshotSpaceConsumed = 11;
private long snapshotSpaceConsumed_ ;
/**
* optional uint64 snapshotSpaceConsumed = 11;
*/
public boolean hasSnapshotSpaceConsumed() {
return ((bitField0_ & 0x00000400) == 0x00000400);
}
/**
* optional uint64 snapshotSpaceConsumed = 11;
*/
public long getSnapshotSpaceConsumed() {
return snapshotSpaceConsumed_;
}
/**
* optional uint64 snapshotSpaceConsumed = 11;
*/
public Builder setSnapshotSpaceConsumed(long value) {
bitField0_ |= 0x00000400;
snapshotSpaceConsumed_ = value;
onChanged();
return this;
}
/**
* optional uint64 snapshotSpaceConsumed = 11;
*/
public Builder clearSnapshotSpaceConsumed() {
bitField0_ = (bitField0_ & ~0x00000400);
snapshotSpaceConsumed_ = 0L;
onChanged();
return this;
}
// optional string erasureCodingPolicy = 12;
private java.lang.Object erasureCodingPolicy_ = "";
/**
* optional string erasureCodingPolicy = 12;
*/
public boolean hasErasureCodingPolicy() {
return ((bitField0_ & 0x00000800) == 0x00000800);
}
/**
* optional string erasureCodingPolicy = 12;
*/
public java.lang.String getErasureCodingPolicy() {
java.lang.Object ref = erasureCodingPolicy_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
erasureCodingPolicy_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string erasureCodingPolicy = 12;
*/
public com.google.protobuf.ByteString
getErasureCodingPolicyBytes() {
java.lang.Object ref = erasureCodingPolicy_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
erasureCodingPolicy_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* optional string erasureCodingPolicy = 12;
*/
public Builder setErasureCodingPolicy(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000800;
erasureCodingPolicy_ = value;
onChanged();
return this;
}
/**
* optional string erasureCodingPolicy = 12;
*/
public Builder clearErasureCodingPolicy() {
bitField0_ = (bitField0_ & ~0x00000800);
erasureCodingPolicy_ = getDefaultInstance().getErasureCodingPolicy();
onChanged();
return this;
}
/**
* optional string erasureCodingPolicy = 12;
*/
public Builder setErasureCodingPolicyBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000800;
erasureCodingPolicy_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ContentSummaryProto)
}
static {
defaultInstance = new ContentSummaryProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ContentSummaryProto)
}
public interface QuotaUsageProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required uint64 fileAndDirectoryCount = 1;
/**
* required uint64 fileAndDirectoryCount = 1;
*/
boolean hasFileAndDirectoryCount();
/**
* required uint64 fileAndDirectoryCount = 1;
*/
long getFileAndDirectoryCount();
// required uint64 quota = 2;
/**
* required uint64 quota = 2;
*/
boolean hasQuota();
/**
* required uint64 quota = 2;
*/
long getQuota();
// required uint64 spaceConsumed = 3;
/**
* required uint64 spaceConsumed = 3;
*/
boolean hasSpaceConsumed();
/**
* required uint64 spaceConsumed = 3;
*/
long getSpaceConsumed();
// required uint64 spaceQuota = 4;
/**
* required uint64 spaceQuota = 4;
*/
boolean hasSpaceQuota();
/**
* required uint64 spaceQuota = 4;
*/
long getSpaceQuota();
// optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;
*/
boolean hasTypeQuotaInfos();
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getTypeQuotaInfos();
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder getTypeQuotaInfosOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.QuotaUsageProto}
*
*
**
* Summary of quota usage of a directory
*
*/
public static final class QuotaUsageProto extends
com.google.protobuf.GeneratedMessage
implements QuotaUsageProtoOrBuilder {
// Use QuotaUsageProto.newBuilder() to construct.
private QuotaUsageProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private QuotaUsageProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final QuotaUsageProto defaultInstance;
public static QuotaUsageProto getDefaultInstance() {
return defaultInstance;
}
public QuotaUsageProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private QuotaUsageProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
fileAndDirectoryCount_ = input.readUInt64();
break;
}
case 16: {
bitField0_ |= 0x00000002;
quota_ = input.readUInt64();
break;
}
case 24: {
bitField0_ |= 0x00000004;
spaceConsumed_ = input.readUInt64();
break;
}
case 32: {
bitField0_ |= 0x00000008;
spaceQuota_ = input.readUInt64();
break;
}
case 42: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000010) == 0x00000010)) {
subBuilder = typeQuotaInfos_.toBuilder();
}
typeQuotaInfos_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(typeQuotaInfos_);
typeQuotaInfos_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000010;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_QuotaUsageProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_QuotaUsageProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public QuotaUsageProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new QuotaUsageProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required uint64 fileAndDirectoryCount = 1;
public static final int FILEANDDIRECTORYCOUNT_FIELD_NUMBER = 1;
private long fileAndDirectoryCount_;
/**
* required uint64 fileAndDirectoryCount = 1;
*/
public boolean hasFileAndDirectoryCount() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint64 fileAndDirectoryCount = 1;
*/
public long getFileAndDirectoryCount() {
return fileAndDirectoryCount_;
}
// required uint64 quota = 2;
public static final int QUOTA_FIELD_NUMBER = 2;
private long quota_;
/**
* required uint64 quota = 2;
*/
public boolean hasQuota() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 quota = 2;
*/
public long getQuota() {
return quota_;
}
// required uint64 spaceConsumed = 3;
public static final int SPACECONSUMED_FIELD_NUMBER = 3;
private long spaceConsumed_;
/**
* required uint64 spaceConsumed = 3;
*/
public boolean hasSpaceConsumed() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 spaceConsumed = 3;
*/
public long getSpaceConsumed() {
return spaceConsumed_;
}
// required uint64 spaceQuota = 4;
public static final int SPACEQUOTA_FIELD_NUMBER = 4;
private long spaceQuota_;
/**
* required uint64 spaceQuota = 4;
*/
public boolean hasSpaceQuota() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required uint64 spaceQuota = 4;
*/
public long getSpaceQuota() {
return spaceQuota_;
}
// optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;
public static final int TYPEQUOTAINFOS_FIELD_NUMBER = 5;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto typeQuotaInfos_;
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;
*/
public boolean hasTypeQuotaInfos() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getTypeQuotaInfos() {
return typeQuotaInfos_;
}
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder getTypeQuotaInfosOrBuilder() {
return typeQuotaInfos_;
}
private void initFields() {
fileAndDirectoryCount_ = 0L;
quota_ = 0L;
spaceConsumed_ = 0L;
spaceQuota_ = 0L;
typeQuotaInfos_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasFileAndDirectoryCount()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasQuota()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasSpaceConsumed()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasSpaceQuota()) {
memoizedIsInitialized = 0;
return false;
}
if (hasTypeQuotaInfos()) {
if (!getTypeQuotaInfos().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt64(1, fileAndDirectoryCount_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, quota_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt64(3, spaceConsumed_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt64(4, spaceQuota_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeMessage(5, typeQuotaInfos_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(1, fileAndDirectoryCount_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, quota_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(3, spaceConsumed_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(4, spaceQuota_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(5, typeQuotaInfos_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto) obj;
boolean result = true;
result = result && (hasFileAndDirectoryCount() == other.hasFileAndDirectoryCount());
if (hasFileAndDirectoryCount()) {
result = result && (getFileAndDirectoryCount()
== other.getFileAndDirectoryCount());
}
result = result && (hasQuota() == other.hasQuota());
if (hasQuota()) {
result = result && (getQuota()
== other.getQuota());
}
result = result && (hasSpaceConsumed() == other.hasSpaceConsumed());
if (hasSpaceConsumed()) {
result = result && (getSpaceConsumed()
== other.getSpaceConsumed());
}
result = result && (hasSpaceQuota() == other.hasSpaceQuota());
if (hasSpaceQuota()) {
result = result && (getSpaceQuota()
== other.getSpaceQuota());
}
result = result && (hasTypeQuotaInfos() == other.hasTypeQuotaInfos());
if (hasTypeQuotaInfos()) {
result = result && getTypeQuotaInfos()
.equals(other.getTypeQuotaInfos());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasFileAndDirectoryCount()) {
hash = (37 * hash) + FILEANDDIRECTORYCOUNT_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getFileAndDirectoryCount());
}
if (hasQuota()) {
hash = (37 * hash) + QUOTA_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getQuota());
}
if (hasSpaceConsumed()) {
hash = (37 * hash) + SPACECONSUMED_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getSpaceConsumed());
}
if (hasSpaceQuota()) {
hash = (37 * hash) + SPACEQUOTA_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getSpaceQuota());
}
if (hasTypeQuotaInfos()) {
hash = (37 * hash) + TYPEQUOTAINFOS_FIELD_NUMBER;
hash = (53 * hash) + getTypeQuotaInfos().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.QuotaUsageProto}
*
*
**
* Summary of quota usage of a directory
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_QuotaUsageProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_QuotaUsageProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getTypeQuotaInfosFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
fileAndDirectoryCount_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
quota_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
spaceConsumed_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
spaceQuota_ = 0L;
bitField0_ = (bitField0_ & ~0x00000008);
if (typeQuotaInfosBuilder_ == null) {
typeQuotaInfos_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance();
} else {
typeQuotaInfosBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_QuotaUsageProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.fileAndDirectoryCount_ = fileAndDirectoryCount_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.quota_ = quota_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.spaceConsumed_ = spaceConsumed_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.spaceQuota_ = spaceQuota_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
if (typeQuotaInfosBuilder_ == null) {
result.typeQuotaInfos_ = typeQuotaInfos_;
} else {
result.typeQuotaInfos_ = typeQuotaInfosBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.getDefaultInstance()) return this;
if (other.hasFileAndDirectoryCount()) {
setFileAndDirectoryCount(other.getFileAndDirectoryCount());
}
if (other.hasQuota()) {
setQuota(other.getQuota());
}
if (other.hasSpaceConsumed()) {
setSpaceConsumed(other.getSpaceConsumed());
}
if (other.hasSpaceQuota()) {
setSpaceQuota(other.getSpaceQuota());
}
if (other.hasTypeQuotaInfos()) {
mergeTypeQuotaInfos(other.getTypeQuotaInfos());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasFileAndDirectoryCount()) {
return false;
}
if (!hasQuota()) {
return false;
}
if (!hasSpaceConsumed()) {
return false;
}
if (!hasSpaceQuota()) {
return false;
}
if (hasTypeQuotaInfos()) {
if (!getTypeQuotaInfos().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required uint64 fileAndDirectoryCount = 1;
private long fileAndDirectoryCount_ ;
/**
* required uint64 fileAndDirectoryCount = 1;
*/
public boolean hasFileAndDirectoryCount() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint64 fileAndDirectoryCount = 1;
*/
public long getFileAndDirectoryCount() {
return fileAndDirectoryCount_;
}
/**
* required uint64 fileAndDirectoryCount = 1;
*/
public Builder setFileAndDirectoryCount(long value) {
bitField0_ |= 0x00000001;
fileAndDirectoryCount_ = value;
onChanged();
return this;
}
/**
* required uint64 fileAndDirectoryCount = 1;
*/
public Builder clearFileAndDirectoryCount() {
bitField0_ = (bitField0_ & ~0x00000001);
fileAndDirectoryCount_ = 0L;
onChanged();
return this;
}
// required uint64 quota = 2;
private long quota_ ;
/**
* required uint64 quota = 2;
*/
public boolean hasQuota() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 quota = 2;
*/
public long getQuota() {
return quota_;
}
/**
* required uint64 quota = 2;
*/
public Builder setQuota(long value) {
bitField0_ |= 0x00000002;
quota_ = value;
onChanged();
return this;
}
/**
* required uint64 quota = 2;
*/
public Builder clearQuota() {
bitField0_ = (bitField0_ & ~0x00000002);
quota_ = 0L;
onChanged();
return this;
}
// required uint64 spaceConsumed = 3;
private long spaceConsumed_ ;
/**
* required uint64 spaceConsumed = 3;
*/
public boolean hasSpaceConsumed() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 spaceConsumed = 3;
*/
public long getSpaceConsumed() {
return spaceConsumed_;
}
/**
* required uint64 spaceConsumed = 3;
*/
public Builder setSpaceConsumed(long value) {
bitField0_ |= 0x00000004;
spaceConsumed_ = value;
onChanged();
return this;
}
/**
* required uint64 spaceConsumed = 3;
*/
public Builder clearSpaceConsumed() {
bitField0_ = (bitField0_ & ~0x00000004);
spaceConsumed_ = 0L;
onChanged();
return this;
}
// required uint64 spaceQuota = 4;
private long spaceQuota_ ;
/**
* required uint64 spaceQuota = 4;
*/
public boolean hasSpaceQuota() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required uint64 spaceQuota = 4;
*/
public long getSpaceQuota() {
return spaceQuota_;
}
/**
* required uint64 spaceQuota = 4;
*/
public Builder setSpaceQuota(long value) {
bitField0_ |= 0x00000008;
spaceQuota_ = value;
onChanged();
return this;
}
/**
* required uint64 spaceQuota = 4;
*/
public Builder clearSpaceQuota() {
bitField0_ = (bitField0_ & ~0x00000008);
spaceQuota_ = 0L;
onChanged();
return this;
}
// optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto typeQuotaInfos_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder> typeQuotaInfosBuilder_;
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;
*/
public boolean hasTypeQuotaInfos() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getTypeQuotaInfos() {
if (typeQuotaInfosBuilder_ == null) {
return typeQuotaInfos_;
} else {
return typeQuotaInfosBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;
*/
public Builder setTypeQuotaInfos(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto value) {
if (typeQuotaInfosBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
typeQuotaInfos_ = value;
onChanged();
} else {
typeQuotaInfosBuilder_.setMessage(value);
}
bitField0_ |= 0x00000010;
return this;
}
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;
*/
public Builder setTypeQuotaInfos(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder builderForValue) {
if (typeQuotaInfosBuilder_ == null) {
typeQuotaInfos_ = builderForValue.build();
onChanged();
} else {
typeQuotaInfosBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000010;
return this;
}
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;
*/
public Builder mergeTypeQuotaInfos(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto value) {
if (typeQuotaInfosBuilder_ == null) {
if (((bitField0_ & 0x00000010) == 0x00000010) &&
typeQuotaInfos_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance()) {
typeQuotaInfos_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.newBuilder(typeQuotaInfos_).mergeFrom(value).buildPartial();
} else {
typeQuotaInfos_ = value;
}
onChanged();
} else {
typeQuotaInfosBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000010;
return this;
}
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;
*/
public Builder clearTypeQuotaInfos() {
if (typeQuotaInfosBuilder_ == null) {
typeQuotaInfos_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance();
onChanged();
} else {
typeQuotaInfosBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder getTypeQuotaInfosBuilder() {
bitField0_ |= 0x00000010;
onChanged();
return getTypeQuotaInfosFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder getTypeQuotaInfosOrBuilder() {
if (typeQuotaInfosBuilder_ != null) {
return typeQuotaInfosBuilder_.getMessageOrBuilder();
} else {
return typeQuotaInfos_;
}
}
/**
* optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder>
getTypeQuotaInfosFieldBuilder() {
if (typeQuotaInfosBuilder_ == null) {
typeQuotaInfosBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder>(
typeQuotaInfos_,
getParentForChildren(),
isClean());
typeQuotaInfos_ = null;
}
return typeQuotaInfosBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.QuotaUsageProto)
}
static {
defaultInstance = new QuotaUsageProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.QuotaUsageProto)
}
public interface StorageTypeQuotaInfosProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
java.util.List
getTypeQuotaInfoList();
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto getTypeQuotaInfo(int index);
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
int getTypeQuotaInfoCount();
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder>
getTypeQuotaInfoOrBuilderList();
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder getTypeQuotaInfoOrBuilder(
int index);
}
/**
* Protobuf type {@code hadoop.hdfs.StorageTypeQuotaInfosProto}
*
*
**
* Storage type quota and usage information of a file or directory
*
*/
public static final class StorageTypeQuotaInfosProto extends
com.google.protobuf.GeneratedMessage
implements StorageTypeQuotaInfosProtoOrBuilder {
// Use StorageTypeQuotaInfosProto.newBuilder() to construct.
private StorageTypeQuotaInfosProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private StorageTypeQuotaInfosProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final StorageTypeQuotaInfosProto defaultInstance;
public static StorageTypeQuotaInfosProto getDefaultInstance() {
return defaultInstance;
}
public StorageTypeQuotaInfosProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private StorageTypeQuotaInfosProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
typeQuotaInfo_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
typeQuotaInfo_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.PARSER, extensionRegistry));
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
typeQuotaInfo_ = java.util.Collections.unmodifiableList(typeQuotaInfo_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public StorageTypeQuotaInfosProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new StorageTypeQuotaInfosProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
// repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
public static final int TYPEQUOTAINFO_FIELD_NUMBER = 1;
private java.util.List typeQuotaInfo_;
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public java.util.List getTypeQuotaInfoList() {
return typeQuotaInfo_;
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder>
getTypeQuotaInfoOrBuilderList() {
return typeQuotaInfo_;
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public int getTypeQuotaInfoCount() {
return typeQuotaInfo_.size();
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto getTypeQuotaInfo(int index) {
return typeQuotaInfo_.get(index);
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder getTypeQuotaInfoOrBuilder(
int index) {
return typeQuotaInfo_.get(index);
}
private void initFields() {
typeQuotaInfo_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
for (int i = 0; i < getTypeQuotaInfoCount(); i++) {
if (!getTypeQuotaInfo(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < typeQuotaInfo_.size(); i++) {
output.writeMessage(1, typeQuotaInfo_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
for (int i = 0; i < typeQuotaInfo_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, typeQuotaInfo_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto) obj;
boolean result = true;
result = result && getTypeQuotaInfoList()
.equals(other.getTypeQuotaInfoList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (getTypeQuotaInfoCount() > 0) {
hash = (37 * hash) + TYPEQUOTAINFO_FIELD_NUMBER;
hash = (53 * hash) + getTypeQuotaInfoList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.StorageTypeQuotaInfosProto}
*
*
**
* Storage type quota and usage information of a file or directory
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getTypeQuotaInfoFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (typeQuotaInfoBuilder_ == null) {
typeQuotaInfo_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
} else {
typeQuotaInfoBuilder_.clear();
}
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto(this);
int from_bitField0_ = bitField0_;
if (typeQuotaInfoBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001)) {
typeQuotaInfo_ = java.util.Collections.unmodifiableList(typeQuotaInfo_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.typeQuotaInfo_ = typeQuotaInfo_;
} else {
result.typeQuotaInfo_ = typeQuotaInfoBuilder_.build();
}
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance()) return this;
if (typeQuotaInfoBuilder_ == null) {
if (!other.typeQuotaInfo_.isEmpty()) {
if (typeQuotaInfo_.isEmpty()) {
typeQuotaInfo_ = other.typeQuotaInfo_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureTypeQuotaInfoIsMutable();
typeQuotaInfo_.addAll(other.typeQuotaInfo_);
}
onChanged();
}
} else {
if (!other.typeQuotaInfo_.isEmpty()) {
if (typeQuotaInfoBuilder_.isEmpty()) {
typeQuotaInfoBuilder_.dispose();
typeQuotaInfoBuilder_ = null;
typeQuotaInfo_ = other.typeQuotaInfo_;
bitField0_ = (bitField0_ & ~0x00000001);
typeQuotaInfoBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getTypeQuotaInfoFieldBuilder() : null;
} else {
typeQuotaInfoBuilder_.addAllMessages(other.typeQuotaInfo_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
for (int i = 0; i < getTypeQuotaInfoCount(); i++) {
if (!getTypeQuotaInfo(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
private java.util.List typeQuotaInfo_ =
java.util.Collections.emptyList();
private void ensureTypeQuotaInfoIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
typeQuotaInfo_ = new java.util.ArrayList(typeQuotaInfo_);
bitField0_ |= 0x00000001;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder> typeQuotaInfoBuilder_;
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public java.util.List getTypeQuotaInfoList() {
if (typeQuotaInfoBuilder_ == null) {
return java.util.Collections.unmodifiableList(typeQuotaInfo_);
} else {
return typeQuotaInfoBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public int getTypeQuotaInfoCount() {
if (typeQuotaInfoBuilder_ == null) {
return typeQuotaInfo_.size();
} else {
return typeQuotaInfoBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto getTypeQuotaInfo(int index) {
if (typeQuotaInfoBuilder_ == null) {
return typeQuotaInfo_.get(index);
} else {
return typeQuotaInfoBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public Builder setTypeQuotaInfo(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto value) {
if (typeQuotaInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTypeQuotaInfoIsMutable();
typeQuotaInfo_.set(index, value);
onChanged();
} else {
typeQuotaInfoBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public Builder setTypeQuotaInfo(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder builderForValue) {
if (typeQuotaInfoBuilder_ == null) {
ensureTypeQuotaInfoIsMutable();
typeQuotaInfo_.set(index, builderForValue.build());
onChanged();
} else {
typeQuotaInfoBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public Builder addTypeQuotaInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto value) {
if (typeQuotaInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTypeQuotaInfoIsMutable();
typeQuotaInfo_.add(value);
onChanged();
} else {
typeQuotaInfoBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public Builder addTypeQuotaInfo(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto value) {
if (typeQuotaInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTypeQuotaInfoIsMutable();
typeQuotaInfo_.add(index, value);
onChanged();
} else {
typeQuotaInfoBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public Builder addTypeQuotaInfo(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder builderForValue) {
if (typeQuotaInfoBuilder_ == null) {
ensureTypeQuotaInfoIsMutable();
typeQuotaInfo_.add(builderForValue.build());
onChanged();
} else {
typeQuotaInfoBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public Builder addTypeQuotaInfo(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder builderForValue) {
if (typeQuotaInfoBuilder_ == null) {
ensureTypeQuotaInfoIsMutable();
typeQuotaInfo_.add(index, builderForValue.build());
onChanged();
} else {
typeQuotaInfoBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public Builder addAllTypeQuotaInfo(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto> values) {
if (typeQuotaInfoBuilder_ == null) {
ensureTypeQuotaInfoIsMutable();
super.addAll(values, typeQuotaInfo_);
onChanged();
} else {
typeQuotaInfoBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public Builder clearTypeQuotaInfo() {
if (typeQuotaInfoBuilder_ == null) {
typeQuotaInfo_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
} else {
typeQuotaInfoBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public Builder removeTypeQuotaInfo(int index) {
if (typeQuotaInfoBuilder_ == null) {
ensureTypeQuotaInfoIsMutable();
typeQuotaInfo_.remove(index);
onChanged();
} else {
typeQuotaInfoBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder getTypeQuotaInfoBuilder(
int index) {
return getTypeQuotaInfoFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder getTypeQuotaInfoOrBuilder(
int index) {
if (typeQuotaInfoBuilder_ == null) {
return typeQuotaInfo_.get(index); } else {
return typeQuotaInfoBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder>
getTypeQuotaInfoOrBuilderList() {
if (typeQuotaInfoBuilder_ != null) {
return typeQuotaInfoBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(typeQuotaInfo_);
}
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder addTypeQuotaInfoBuilder() {
return getTypeQuotaInfoFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder addTypeQuotaInfoBuilder(
int index) {
return getTypeQuotaInfoFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;
*/
public java.util.List
getTypeQuotaInfoBuilderList() {
return getTypeQuotaInfoFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder>
getTypeQuotaInfoFieldBuilder() {
if (typeQuotaInfoBuilder_ == null) {
typeQuotaInfoBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder>(
typeQuotaInfo_,
((bitField0_ & 0x00000001) == 0x00000001),
getParentForChildren(),
isClean());
typeQuotaInfo_ = null;
}
return typeQuotaInfoBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageTypeQuotaInfosProto)
}
static {
defaultInstance = new StorageTypeQuotaInfosProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageTypeQuotaInfosProto)
}
public interface StorageTypeQuotaInfoProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.StorageTypeProto type = 1;
/**
* required .hadoop.hdfs.StorageTypeProto type = 1;
*/
boolean hasType();
/**
* required .hadoop.hdfs.StorageTypeProto type = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getType();
// required uint64 quota = 2;
/**
* required uint64 quota = 2;
*/
boolean hasQuota();
/**
* required uint64 quota = 2;
*/
long getQuota();
// required uint64 consumed = 3;
/**
* required uint64 consumed = 3;
*/
boolean hasConsumed();
/**
* required uint64 consumed = 3;
*/
long getConsumed();
}
/**
* Protobuf type {@code hadoop.hdfs.StorageTypeQuotaInfoProto}
*/
public static final class StorageTypeQuotaInfoProto extends
com.google.protobuf.GeneratedMessage
implements StorageTypeQuotaInfoProtoOrBuilder {
// Use StorageTypeQuotaInfoProto.newBuilder() to construct.
private StorageTypeQuotaInfoProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private StorageTypeQuotaInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final StorageTypeQuotaInfoProto defaultInstance;
public static StorageTypeQuotaInfoProto getDefaultInstance() {
return defaultInstance;
}
public StorageTypeQuotaInfoProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private StorageTypeQuotaInfoProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
type_ = value;
}
break;
}
case 16: {
bitField0_ |= 0x00000002;
quota_ = input.readUInt64();
break;
}
case 24: {
bitField0_ |= 0x00000004;
consumed_ = input.readUInt64();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public StorageTypeQuotaInfoProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new StorageTypeQuotaInfoProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.StorageTypeProto type = 1;
public static final int TYPE_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto type_;
/**
* required .hadoop.hdfs.StorageTypeProto type = 1;
*/
public boolean hasType() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.StorageTypeProto type = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getType() {
return type_;
}
// required uint64 quota = 2;
public static final int QUOTA_FIELD_NUMBER = 2;
private long quota_;
/**
* required uint64 quota = 2;
*/
public boolean hasQuota() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 quota = 2;
*/
public long getQuota() {
return quota_;
}
// required uint64 consumed = 3;
public static final int CONSUMED_FIELD_NUMBER = 3;
private long consumed_;
/**
* required uint64 consumed = 3;
*/
public boolean hasConsumed() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 consumed = 3;
*/
public long getConsumed() {
return consumed_;
}
private void initFields() {
type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
quota_ = 0L;
consumed_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasType()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasQuota()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasConsumed()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, type_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, quota_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt64(3, consumed_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, type_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, quota_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(3, consumed_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto) obj;
boolean result = true;
result = result && (hasType() == other.hasType());
if (hasType()) {
result = result &&
(getType() == other.getType());
}
result = result && (hasQuota() == other.hasQuota());
if (hasQuota()) {
result = result && (getQuota()
== other.getQuota());
}
result = result && (hasConsumed() == other.hasConsumed());
if (hasConsumed()) {
result = result && (getConsumed()
== other.getConsumed());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasType()) {
hash = (37 * hash) + TYPE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getType());
}
if (hasQuota()) {
hash = (37 * hash) + QUOTA_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getQuota());
}
if (hasConsumed()) {
hash = (37 * hash) + CONSUMED_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getConsumed());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.StorageTypeQuotaInfoProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
bitField0_ = (bitField0_ & ~0x00000001);
quota_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
consumed_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.type_ = type_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.quota_ = quota_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.consumed_ = consumed_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.getDefaultInstance()) return this;
if (other.hasType()) {
setType(other.getType());
}
if (other.hasQuota()) {
setQuota(other.getQuota());
}
if (other.hasConsumed()) {
setConsumed(other.getConsumed());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasType()) {
return false;
}
if (!hasQuota()) {
return false;
}
if (!hasConsumed()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.StorageTypeProto type = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
/**
* required .hadoop.hdfs.StorageTypeProto type = 1;
*/
public boolean hasType() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.StorageTypeProto type = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getType() {
return type_;
}
/**
* required .hadoop.hdfs.StorageTypeProto type = 1;
*/
public Builder setType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
type_ = value;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.StorageTypeProto type = 1;
*/
public Builder clearType() {
bitField0_ = (bitField0_ & ~0x00000001);
type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
onChanged();
return this;
}
// required uint64 quota = 2;
private long quota_ ;
/**
* required uint64 quota = 2;
*/
public boolean hasQuota() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 quota = 2;
*/
public long getQuota() {
return quota_;
}
/**
* required uint64 quota = 2;
*/
public Builder setQuota(long value) {
bitField0_ |= 0x00000002;
quota_ = value;
onChanged();
return this;
}
/**
* required uint64 quota = 2;
*/
public Builder clearQuota() {
bitField0_ = (bitField0_ & ~0x00000002);
quota_ = 0L;
onChanged();
return this;
}
// required uint64 consumed = 3;
private long consumed_ ;
/**
* required uint64 consumed = 3;
*/
public boolean hasConsumed() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 consumed = 3;
*/
public long getConsumed() {
return consumed_;
}
/**
* required uint64 consumed = 3;
*/
public Builder setConsumed(long value) {
bitField0_ |= 0x00000004;
consumed_ = value;
onChanged();
return this;
}
/**
* required uint64 consumed = 3;
*/
public Builder clearConsumed() {
bitField0_ = (bitField0_ & ~0x00000004);
consumed_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageTypeQuotaInfoProto)
}
static {
defaultInstance = new StorageTypeQuotaInfoProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageTypeQuotaInfoProto)
}
public interface CorruptFileBlocksProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated string files = 1;
/**
* repeated string files = 1;
*/
java.util.List
getFilesList();
/**
* repeated string files = 1;
*/
int getFilesCount();
/**
* repeated string files = 1;
*/
java.lang.String getFiles(int index);
/**
* repeated string files = 1;
*/
com.google.protobuf.ByteString
getFilesBytes(int index);
// required string cookie = 2;
/**
* required string cookie = 2;
*/
boolean hasCookie();
/**
* required string cookie = 2;
*/
java.lang.String getCookie();
/**
* required string cookie = 2;
*/
com.google.protobuf.ByteString
getCookieBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.CorruptFileBlocksProto}
*
*
**
* Contains a list of paths corresponding to corrupt files and a cookie
* used for iterative calls to NameNode.listCorruptFileBlocks.
*
*/
public static final class CorruptFileBlocksProto extends
com.google.protobuf.GeneratedMessage
implements CorruptFileBlocksProtoOrBuilder {
// Use CorruptFileBlocksProto.newBuilder() to construct.
private CorruptFileBlocksProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private CorruptFileBlocksProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final CorruptFileBlocksProto defaultInstance;
public static CorruptFileBlocksProto getDefaultInstance() {
return defaultInstance;
}
public CorruptFileBlocksProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private CorruptFileBlocksProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
files_ = new com.google.protobuf.LazyStringArrayList();
mutable_bitField0_ |= 0x00000001;
}
files_.add(input.readBytes());
break;
}
case 18: {
bitField0_ |= 0x00000001;
cookie_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
files_ = new com.google.protobuf.UnmodifiableLazyStringList(files_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CorruptFileBlocksProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CorruptFileBlocksProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public CorruptFileBlocksProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new CorruptFileBlocksProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// repeated string files = 1;
public static final int FILES_FIELD_NUMBER = 1;
private com.google.protobuf.LazyStringList files_;
/**
* repeated string files = 1;
*/
public java.util.List
getFilesList() {
return files_;
}
/**
* repeated string files = 1;
*/
public int getFilesCount() {
return files_.size();
}
/**
* repeated string files = 1;
*/
public java.lang.String getFiles(int index) {
return files_.get(index);
}
/**
* repeated string files = 1;
*/
public com.google.protobuf.ByteString
getFilesBytes(int index) {
return files_.getByteString(index);
}
// required string cookie = 2;
public static final int COOKIE_FIELD_NUMBER = 2;
private java.lang.Object cookie_;
/**
* required string cookie = 2;
*/
public boolean hasCookie() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string cookie = 2;
*/
public java.lang.String getCookie() {
java.lang.Object ref = cookie_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
cookie_ = s;
}
return s;
}
}
/**
* required string cookie = 2;
*/
public com.google.protobuf.ByteString
getCookieBytes() {
java.lang.Object ref = cookie_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
cookie_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
files_ = com.google.protobuf.LazyStringArrayList.EMPTY;
cookie_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasCookie()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < files_.size(); i++) {
output.writeBytes(1, files_.getByteString(i));
}
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(2, getCookieBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
{
int dataSize = 0;
for (int i = 0; i < files_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeBytesSizeNoTag(files_.getByteString(i));
}
size += dataSize;
size += 1 * getFilesList().size();
}
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getCookieBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto) obj;
boolean result = true;
result = result && getFilesList()
.equals(other.getFilesList());
result = result && (hasCookie() == other.hasCookie());
if (hasCookie()) {
result = result && getCookie()
.equals(other.getCookie());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (getFilesCount() > 0) {
hash = (37 * hash) + FILES_FIELD_NUMBER;
hash = (53 * hash) + getFilesList().hashCode();
}
if (hasCookie()) {
hash = (37 * hash) + COOKIE_FIELD_NUMBER;
hash = (53 * hash) + getCookie().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.CorruptFileBlocksProto}
*
*
**
* Contains a list of paths corresponding to corrupt files and a cookie
* used for iterative calls to NameNode.listCorruptFileBlocks.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CorruptFileBlocksProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CorruptFileBlocksProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
files_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000001);
cookie_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CorruptFileBlocksProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
files_ = new com.google.protobuf.UnmodifiableLazyStringList(
files_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.files_ = files_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000001;
}
result.cookie_ = cookie_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDefaultInstance()) return this;
if (!other.files_.isEmpty()) {
if (files_.isEmpty()) {
files_ = other.files_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureFilesIsMutable();
files_.addAll(other.files_);
}
onChanged();
}
if (other.hasCookie()) {
bitField0_ |= 0x00000002;
cookie_ = other.cookie_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasCookie()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// repeated string files = 1;
private com.google.protobuf.LazyStringList files_ = com.google.protobuf.LazyStringArrayList.EMPTY;
private void ensureFilesIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
files_ = new com.google.protobuf.LazyStringArrayList(files_);
bitField0_ |= 0x00000001;
}
}
/**
* repeated string files = 1;
*/
public java.util.List
getFilesList() {
return java.util.Collections.unmodifiableList(files_);
}
/**
* repeated string files = 1;
*/
public int getFilesCount() {
return files_.size();
}
/**
* repeated string files = 1;
*/
public java.lang.String getFiles(int index) {
return files_.get(index);
}
/**
* repeated string files = 1;
*/
public com.google.protobuf.ByteString
getFilesBytes(int index) {
return files_.getByteString(index);
}
/**
* repeated string files = 1;
*/
public Builder setFiles(
int index, java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureFilesIsMutable();
files_.set(index, value);
onChanged();
return this;
}
/**
* repeated string files = 1;
*/
public Builder addFiles(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureFilesIsMutable();
files_.add(value);
onChanged();
return this;
}
/**
* repeated string files = 1;
*/
public Builder addAllFiles(
java.lang.Iterable values) {
ensureFilesIsMutable();
super.addAll(values, files_);
onChanged();
return this;
}
/**
* repeated string files = 1;
*/
public Builder clearFiles() {
files_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
return this;
}
/**
* repeated string files = 1;
*/
public Builder addFilesBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
ensureFilesIsMutable();
files_.add(value);
onChanged();
return this;
}
// required string cookie = 2;
private java.lang.Object cookie_ = "";
/**
* required string cookie = 2;
*/
public boolean hasCookie() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string cookie = 2;
*/
public java.lang.String getCookie() {
java.lang.Object ref = cookie_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
cookie_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string cookie = 2;
*/
public com.google.protobuf.ByteString
getCookieBytes() {
java.lang.Object ref = cookie_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
cookie_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string cookie = 2;
*/
public Builder setCookie(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
cookie_ = value;
onChanged();
return this;
}
/**
* required string cookie = 2;
*/
public Builder clearCookie() {
bitField0_ = (bitField0_ & ~0x00000002);
cookie_ = getDefaultInstance().getCookie();
onChanged();
return this;
}
/**
* required string cookie = 2;
*/
public Builder setCookieBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
cookie_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.CorruptFileBlocksProto)
}
static {
defaultInstance = new CorruptFileBlocksProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.CorruptFileBlocksProto)
}
public interface StorageTypesProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
*/
java.util.List getStorageTypesList();
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
*/
int getStorageTypesCount();
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index);
}
/**
* Protobuf type {@code hadoop.hdfs.StorageTypesProto}
*
*
**
* A list of storage types.
*
*/
public static final class StorageTypesProto extends
com.google.protobuf.GeneratedMessage
implements StorageTypesProtoOrBuilder {
// Use StorageTypesProto.newBuilder() to construct.
private StorageTypesProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private StorageTypesProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final StorageTypesProto defaultInstance;
public static StorageTypesProto getDefaultInstance() {
return defaultInstance;
}
public StorageTypesProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private StorageTypesProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
storageTypes_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
storageTypes_.add(value);
}
break;
}
case 10: {
int length = input.readRawVarint32();
int oldLimit = input.pushLimit(length);
while(input.getBytesUntilLimit() > 0) {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
storageTypes_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
storageTypes_.add(value);
}
}
input.popLimit(oldLimit);
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
storageTypes_ = java.util.Collections.unmodifiableList(storageTypes_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypesProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypesProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public StorageTypesProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new StorageTypesProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
// repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
public static final int STORAGETYPES_FIELD_NUMBER = 1;
private java.util.List storageTypes_;
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
*/
public java.util.List getStorageTypesList() {
return storageTypes_;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
*/
public int getStorageTypesCount() {
return storageTypes_.size();
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) {
return storageTypes_.get(index);
}
private void initFields() {
storageTypes_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < storageTypes_.size(); i++) {
output.writeEnum(1, storageTypes_.get(i).getNumber());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
{
int dataSize = 0;
for (int i = 0; i < storageTypes_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeEnumSizeNoTag(storageTypes_.get(i).getNumber());
}
size += dataSize;
size += 1 * storageTypes_.size();
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto) obj;
boolean result = true;
result = result && getStorageTypesList()
.equals(other.getStorageTypesList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (getStorageTypesCount() > 0) {
hash = (37 * hash) + STORAGETYPES_FIELD_NUMBER;
hash = (53 * hash) + hashEnumList(getStorageTypesList());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.StorageTypesProto}
*
*
**
* A list of storage types.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypesProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypesProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
storageTypes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypesProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto(this);
int from_bitField0_ = bitField0_;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
storageTypes_ = java.util.Collections.unmodifiableList(storageTypes_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.storageTypes_ = storageTypes_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance()) return this;
if (!other.storageTypes_.isEmpty()) {
if (storageTypes_.isEmpty()) {
storageTypes_ = other.storageTypes_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureStorageTypesIsMutable();
storageTypes_.addAll(other.storageTypes_);
}
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
private java.util.List storageTypes_ =
java.util.Collections.emptyList();
private void ensureStorageTypesIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
storageTypes_ = new java.util.ArrayList(storageTypes_);
bitField0_ |= 0x00000001;
}
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
*/
public java.util.List getStorageTypesList() {
return java.util.Collections.unmodifiableList(storageTypes_);
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
*/
public int getStorageTypesCount() {
return storageTypes_.size();
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) {
return storageTypes_.get(index);
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
*/
public Builder setStorageTypes(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
ensureStorageTypesIsMutable();
storageTypes_.set(index, value);
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
*/
public Builder addStorageTypes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
ensureStorageTypesIsMutable();
storageTypes_.add(value);
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
*/
public Builder addAllStorageTypes(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> values) {
ensureStorageTypesIsMutable();
super.addAll(values, storageTypes_);
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;
*/
public Builder clearStorageTypes() {
storageTypes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageTypesProto)
}
static {
defaultInstance = new StorageTypesProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageTypesProto)
}
public interface BlockStoragePolicyProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required uint32 policyId = 1;
/**
* required uint32 policyId = 1;
*/
boolean hasPolicyId();
/**
* required uint32 policyId = 1;
*/
int getPolicyId();
// required string name = 2;
/**
* required string name = 2;
*/
boolean hasName();
/**
* required string name = 2;
*/
java.lang.String getName();
/**
* required string name = 2;
*/
com.google.protobuf.ByteString
getNameBytes();
// required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
/**
* required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
*
*
* a list of storage types for storing the block replicas when creating a
* block.
*
*/
boolean hasCreationPolicy();
/**
* required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
*
*
* a list of storage types for storing the block replicas when creating a
* block.
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationPolicy();
/**
* required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
*
*
* a list of storage types for storing the block replicas when creating a
* block.
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationPolicyOrBuilder();
// optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
/**
* optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
*
*
* A list of storage types for creation fallback storage.
*
*/
boolean hasCreationFallbackPolicy();
/**
* optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
*
*
* A list of storage types for creation fallback storage.
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationFallbackPolicy();
/**
* optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
*
*
* A list of storage types for creation fallback storage.
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationFallbackPolicyOrBuilder();
// optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
/**
* optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
*/
boolean hasReplicationFallbackPolicy();
/**
* optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getReplicationFallbackPolicy();
/**
* optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getReplicationFallbackPolicyOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.BlockStoragePolicyProto}
*
*
**
* Block replica storage policy.
*
*/
public static final class BlockStoragePolicyProto extends
com.google.protobuf.GeneratedMessage
implements BlockStoragePolicyProtoOrBuilder {
// Use BlockStoragePolicyProto.newBuilder() to construct.
private BlockStoragePolicyProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private BlockStoragePolicyProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final BlockStoragePolicyProto defaultInstance;
public static BlockStoragePolicyProto getDefaultInstance() {
return defaultInstance;
}
public BlockStoragePolicyProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private BlockStoragePolicyProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
policyId_ = input.readUInt32();
break;
}
case 18: {
bitField0_ |= 0x00000002;
name_ = input.readBytes();
break;
}
case 26: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
subBuilder = creationPolicy_.toBuilder();
}
creationPolicy_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(creationPolicy_);
creationPolicy_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000004;
break;
}
case 34: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000008) == 0x00000008)) {
subBuilder = creationFallbackPolicy_.toBuilder();
}
creationFallbackPolicy_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(creationFallbackPolicy_);
creationFallbackPolicy_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000008;
break;
}
case 42: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000010) == 0x00000010)) {
subBuilder = replicationFallbackPolicy_.toBuilder();
}
replicationFallbackPolicy_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(replicationFallbackPolicy_);
replicationFallbackPolicy_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000010;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockStoragePolicyProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockStoragePolicyProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public BlockStoragePolicyProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new BlockStoragePolicyProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required uint32 policyId = 1;
public static final int POLICYID_FIELD_NUMBER = 1;
private int policyId_;
/**
* required uint32 policyId = 1;
*/
public boolean hasPolicyId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint32 policyId = 1;
*/
public int getPolicyId() {
return policyId_;
}
// required string name = 2;
public static final int NAME_FIELD_NUMBER = 2;
private java.lang.Object name_;
/**
* required string name = 2;
*/
public boolean hasName() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string name = 2;
*/
public java.lang.String getName() {
java.lang.Object ref = name_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
name_ = s;
}
return s;
}
}
/**
* required string name = 2;
*/
public com.google.protobuf.ByteString
getNameBytes() {
java.lang.Object ref = name_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
name_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
public static final int CREATIONPOLICY_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto creationPolicy_;
/**
* required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
*
*
* a list of storage types for storing the block replicas when creating a
* block.
*
*/
public boolean hasCreationPolicy() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
*
*
* a list of storage types for storing the block replicas when creating a
* block.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationPolicy() {
return creationPolicy_;
}
/**
* required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
*
*
* a list of storage types for storing the block replicas when creating a
* block.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationPolicyOrBuilder() {
return creationPolicy_;
}
// optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
public static final int CREATIONFALLBACKPOLICY_FIELD_NUMBER = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto creationFallbackPolicy_;
/**
* optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
*
*
* A list of storage types for creation fallback storage.
*
*/
public boolean hasCreationFallbackPolicy() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
*
*
* A list of storage types for creation fallback storage.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationFallbackPolicy() {
return creationFallbackPolicy_;
}
/**
* optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
*
*
* A list of storage types for creation fallback storage.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationFallbackPolicyOrBuilder() {
return creationFallbackPolicy_;
}
// optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
public static final int REPLICATIONFALLBACKPOLICY_FIELD_NUMBER = 5;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto replicationFallbackPolicy_;
/**
* optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
*/
public boolean hasReplicationFallbackPolicy() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getReplicationFallbackPolicy() {
return replicationFallbackPolicy_;
}
/**
* optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getReplicationFallbackPolicyOrBuilder() {
return replicationFallbackPolicy_;
}
private void initFields() {
policyId_ = 0;
name_ = "";
creationPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance();
creationFallbackPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance();
replicationFallbackPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasPolicyId()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasName()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasCreationPolicy()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt32(1, policyId_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getNameBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeMessage(3, creationPolicy_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeMessage(4, creationFallbackPolicy_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeMessage(5, replicationFallbackPolicy_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(1, policyId_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getNameBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, creationPolicy_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(4, creationFallbackPolicy_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(5, replicationFallbackPolicy_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto) obj;
boolean result = true;
result = result && (hasPolicyId() == other.hasPolicyId());
if (hasPolicyId()) {
result = result && (getPolicyId()
== other.getPolicyId());
}
result = result && (hasName() == other.hasName());
if (hasName()) {
result = result && getName()
.equals(other.getName());
}
result = result && (hasCreationPolicy() == other.hasCreationPolicy());
if (hasCreationPolicy()) {
result = result && getCreationPolicy()
.equals(other.getCreationPolicy());
}
result = result && (hasCreationFallbackPolicy() == other.hasCreationFallbackPolicy());
if (hasCreationFallbackPolicy()) {
result = result && getCreationFallbackPolicy()
.equals(other.getCreationFallbackPolicy());
}
result = result && (hasReplicationFallbackPolicy() == other.hasReplicationFallbackPolicy());
if (hasReplicationFallbackPolicy()) {
result = result && getReplicationFallbackPolicy()
.equals(other.getReplicationFallbackPolicy());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasPolicyId()) {
hash = (37 * hash) + POLICYID_FIELD_NUMBER;
hash = (53 * hash) + getPolicyId();
}
if (hasName()) {
hash = (37 * hash) + NAME_FIELD_NUMBER;
hash = (53 * hash) + getName().hashCode();
}
if (hasCreationPolicy()) {
hash = (37 * hash) + CREATIONPOLICY_FIELD_NUMBER;
hash = (53 * hash) + getCreationPolicy().hashCode();
}
if (hasCreationFallbackPolicy()) {
hash = (37 * hash) + CREATIONFALLBACKPOLICY_FIELD_NUMBER;
hash = (53 * hash) + getCreationFallbackPolicy().hashCode();
}
if (hasReplicationFallbackPolicy()) {
hash = (37 * hash) + REPLICATIONFALLBACKPOLICY_FIELD_NUMBER;
hash = (53 * hash) + getReplicationFallbackPolicy().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.BlockStoragePolicyProto}
*
*
**
* Block replica storage policy.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockStoragePolicyProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockStoragePolicyProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getCreationPolicyFieldBuilder();
getCreationFallbackPolicyFieldBuilder();
getReplicationFallbackPolicyFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
policyId_ = 0;
bitField0_ = (bitField0_ & ~0x00000001);
name_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
if (creationPolicyBuilder_ == null) {
creationPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance();
} else {
creationPolicyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
if (creationFallbackPolicyBuilder_ == null) {
creationFallbackPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance();
} else {
creationFallbackPolicyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
if (replicationFallbackPolicyBuilder_ == null) {
replicationFallbackPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance();
} else {
replicationFallbackPolicyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockStoragePolicyProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.policyId_ = policyId_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.name_ = name_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
if (creationPolicyBuilder_ == null) {
result.creationPolicy_ = creationPolicy_;
} else {
result.creationPolicy_ = creationPolicyBuilder_.build();
}
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
if (creationFallbackPolicyBuilder_ == null) {
result.creationFallbackPolicy_ = creationFallbackPolicy_;
} else {
result.creationFallbackPolicy_ = creationFallbackPolicyBuilder_.build();
}
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
if (replicationFallbackPolicyBuilder_ == null) {
result.replicationFallbackPolicy_ = replicationFallbackPolicy_;
} else {
result.replicationFallbackPolicy_ = replicationFallbackPolicyBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.getDefaultInstance()) return this;
if (other.hasPolicyId()) {
setPolicyId(other.getPolicyId());
}
if (other.hasName()) {
bitField0_ |= 0x00000002;
name_ = other.name_;
onChanged();
}
if (other.hasCreationPolicy()) {
mergeCreationPolicy(other.getCreationPolicy());
}
if (other.hasCreationFallbackPolicy()) {
mergeCreationFallbackPolicy(other.getCreationFallbackPolicy());
}
if (other.hasReplicationFallbackPolicy()) {
mergeReplicationFallbackPolicy(other.getReplicationFallbackPolicy());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasPolicyId()) {
return false;
}
if (!hasName()) {
return false;
}
if (!hasCreationPolicy()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required uint32 policyId = 1;
private int policyId_ ;
/**
* required uint32 policyId = 1;
*/
public boolean hasPolicyId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint32 policyId = 1;
*/
public int getPolicyId() {
return policyId_;
}
/**
* required uint32 policyId = 1;
*/
public Builder setPolicyId(int value) {
bitField0_ |= 0x00000001;
policyId_ = value;
onChanged();
return this;
}
/**
* required uint32 policyId = 1;
*/
public Builder clearPolicyId() {
bitField0_ = (bitField0_ & ~0x00000001);
policyId_ = 0;
onChanged();
return this;
}
// required string name = 2;
private java.lang.Object name_ = "";
/**
* required string name = 2;
*/
public boolean hasName() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string name = 2;
*/
public java.lang.String getName() {
java.lang.Object ref = name_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
name_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string name = 2;
*/
public com.google.protobuf.ByteString
getNameBytes() {
java.lang.Object ref = name_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
name_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string name = 2;
*/
public Builder setName(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
name_ = value;
onChanged();
return this;
}
/**
* required string name = 2;
*/
public Builder clearName() {
bitField0_ = (bitField0_ & ~0x00000002);
name_ = getDefaultInstance().getName();
onChanged();
return this;
}
/**
* required string name = 2;
*/
public Builder setNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
name_ = value;
onChanged();
return this;
}
// required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto creationPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> creationPolicyBuilder_;
/**
* required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
*
*
* a list of storage types for storing the block replicas when creating a
* block.
*
*/
public boolean hasCreationPolicy() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
*
*
* a list of storage types for storing the block replicas when creating a
* block.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationPolicy() {
if (creationPolicyBuilder_ == null) {
return creationPolicy_;
} else {
return creationPolicyBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
*
*
* a list of storage types for storing the block replicas when creating a
* block.
*
*/
public Builder setCreationPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) {
if (creationPolicyBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
creationPolicy_ = value;
onChanged();
} else {
creationPolicyBuilder_.setMessage(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
*
*
* a list of storage types for storing the block replicas when creating a
* block.
*
*/
public Builder setCreationPolicy(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder builderForValue) {
if (creationPolicyBuilder_ == null) {
creationPolicy_ = builderForValue.build();
onChanged();
} else {
creationPolicyBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000004;
return this;
}
/**
* required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
*
*
* a list of storage types for storing the block replicas when creating a
* block.
*
*/
public Builder mergeCreationPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) {
if (creationPolicyBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004) &&
creationPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance()) {
creationPolicy_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.newBuilder(creationPolicy_).mergeFrom(value).buildPartial();
} else {
creationPolicy_ = value;
}
onChanged();
} else {
creationPolicyBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
*
*
* a list of storage types for storing the block replicas when creating a
* block.
*
*/
public Builder clearCreationPolicy() {
if (creationPolicyBuilder_ == null) {
creationPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance();
onChanged();
} else {
creationPolicyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
/**
* required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
*
*
* a list of storage types for storing the block replicas when creating a
* block.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder getCreationPolicyBuilder() {
bitField0_ |= 0x00000004;
onChanged();
return getCreationPolicyFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
*
*
* a list of storage types for storing the block replicas when creating a
* block.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationPolicyOrBuilder() {
if (creationPolicyBuilder_ != null) {
return creationPolicyBuilder_.getMessageOrBuilder();
} else {
return creationPolicy_;
}
}
/**
* required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;
*
*
* a list of storage types for storing the block replicas when creating a
* block.
*
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder>
getCreationPolicyFieldBuilder() {
if (creationPolicyBuilder_ == null) {
creationPolicyBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder>(
creationPolicy_,
getParentForChildren(),
isClean());
creationPolicy_ = null;
}
return creationPolicyBuilder_;
}
// optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto creationFallbackPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> creationFallbackPolicyBuilder_;
/**
* optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
*
*
* A list of storage types for creation fallback storage.
*
*/
public boolean hasCreationFallbackPolicy() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
*
*
* A list of storage types for creation fallback storage.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationFallbackPolicy() {
if (creationFallbackPolicyBuilder_ == null) {
return creationFallbackPolicy_;
} else {
return creationFallbackPolicyBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
*
*
* A list of storage types for creation fallback storage.
*
*/
public Builder setCreationFallbackPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) {
if (creationFallbackPolicyBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
creationFallbackPolicy_ = value;
onChanged();
} else {
creationFallbackPolicyBuilder_.setMessage(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
*
*
* A list of storage types for creation fallback storage.
*
*/
public Builder setCreationFallbackPolicy(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder builderForValue) {
if (creationFallbackPolicyBuilder_ == null) {
creationFallbackPolicy_ = builderForValue.build();
onChanged();
} else {
creationFallbackPolicyBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
*
*
* A list of storage types for creation fallback storage.
*
*/
public Builder mergeCreationFallbackPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) {
if (creationFallbackPolicyBuilder_ == null) {
if (((bitField0_ & 0x00000008) == 0x00000008) &&
creationFallbackPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance()) {
creationFallbackPolicy_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.newBuilder(creationFallbackPolicy_).mergeFrom(value).buildPartial();
} else {
creationFallbackPolicy_ = value;
}
onChanged();
} else {
creationFallbackPolicyBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
*
*
* A list of storage types for creation fallback storage.
*
*/
public Builder clearCreationFallbackPolicy() {
if (creationFallbackPolicyBuilder_ == null) {
creationFallbackPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance();
onChanged();
} else {
creationFallbackPolicyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
/**
* optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
*
*
* A list of storage types for creation fallback storage.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder getCreationFallbackPolicyBuilder() {
bitField0_ |= 0x00000008;
onChanged();
return getCreationFallbackPolicyFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
*
*
* A list of storage types for creation fallback storage.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationFallbackPolicyOrBuilder() {
if (creationFallbackPolicyBuilder_ != null) {
return creationFallbackPolicyBuilder_.getMessageOrBuilder();
} else {
return creationFallbackPolicy_;
}
}
/**
* optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;
*
*
* A list of storage types for creation fallback storage.
*
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder>
getCreationFallbackPolicyFieldBuilder() {
if (creationFallbackPolicyBuilder_ == null) {
creationFallbackPolicyBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder>(
creationFallbackPolicy_,
getParentForChildren(),
isClean());
creationFallbackPolicy_ = null;
}
return creationFallbackPolicyBuilder_;
}
// optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto replicationFallbackPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> replicationFallbackPolicyBuilder_;
/**
* optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
*/
public boolean hasReplicationFallbackPolicy() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getReplicationFallbackPolicy() {
if (replicationFallbackPolicyBuilder_ == null) {
return replicationFallbackPolicy_;
} else {
return replicationFallbackPolicyBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
*/
public Builder setReplicationFallbackPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) {
if (replicationFallbackPolicyBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
replicationFallbackPolicy_ = value;
onChanged();
} else {
replicationFallbackPolicyBuilder_.setMessage(value);
}
bitField0_ |= 0x00000010;
return this;
}
/**
* optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
*/
public Builder setReplicationFallbackPolicy(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder builderForValue) {
if (replicationFallbackPolicyBuilder_ == null) {
replicationFallbackPolicy_ = builderForValue.build();
onChanged();
} else {
replicationFallbackPolicyBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000010;
return this;
}
/**
* optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
*/
public Builder mergeReplicationFallbackPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) {
if (replicationFallbackPolicyBuilder_ == null) {
if (((bitField0_ & 0x00000010) == 0x00000010) &&
replicationFallbackPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance()) {
replicationFallbackPolicy_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.newBuilder(replicationFallbackPolicy_).mergeFrom(value).buildPartial();
} else {
replicationFallbackPolicy_ = value;
}
onChanged();
} else {
replicationFallbackPolicyBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000010;
return this;
}
/**
* optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
*/
public Builder clearReplicationFallbackPolicy() {
if (replicationFallbackPolicyBuilder_ == null) {
replicationFallbackPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance();
onChanged();
} else {
replicationFallbackPolicyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
/**
* optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder getReplicationFallbackPolicyBuilder() {
bitField0_ |= 0x00000010;
onChanged();
return getReplicationFallbackPolicyFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getReplicationFallbackPolicyOrBuilder() {
if (replicationFallbackPolicyBuilder_ != null) {
return replicationFallbackPolicyBuilder_.getMessageOrBuilder();
} else {
return replicationFallbackPolicy_;
}
}
/**
* optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder>
getReplicationFallbackPolicyFieldBuilder() {
if (replicationFallbackPolicyBuilder_ == null) {
replicationFallbackPolicyBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder>(
replicationFallbackPolicy_,
getParentForChildren(),
isClean());
replicationFallbackPolicy_ = null;
}
return replicationFallbackPolicyBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.BlockStoragePolicyProto)
}
static {
defaultInstance = new BlockStoragePolicyProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.BlockStoragePolicyProto)
}
public interface LocatedBlockProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.ExtendedBlockProto b = 1;
/**
* required .hadoop.hdfs.ExtendedBlockProto b = 1;
*/
boolean hasB();
/**
* required .hadoop.hdfs.ExtendedBlockProto b = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB();
/**
* required .hadoop.hdfs.ExtendedBlockProto b = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder();
// required uint64 offset = 2;
/**
* required uint64 offset = 2;
*
*
* offset of first byte of block in the file
*
*/
boolean hasOffset();
/**
* required uint64 offset = 2;
*
*
* offset of first byte of block in the file
*
*/
long getOffset();
// repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
java.util.List
getLocsList();
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getLocs(int index);
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
int getLocsCount();
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getLocsOrBuilderList();
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getLocsOrBuilder(
int index);
// required bool corrupt = 4;
/**
* required bool corrupt = 4;
*
*
* true if all replicas of a block are corrupt, else false
*
*/
boolean hasCorrupt();
/**
* required bool corrupt = 4;
*
*
* true if all replicas of a block are corrupt, else false
*
*/
boolean getCorrupt();
// required .hadoop.common.TokenProto blockToken = 5;
/**
* required .hadoop.common.TokenProto blockToken = 5;
*/
boolean hasBlockToken();
/**
* required .hadoop.common.TokenProto blockToken = 5;
*/
org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockToken();
/**
* required .hadoop.common.TokenProto blockToken = 5;
*/
org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokenOrBuilder();
// repeated bool isCached = 6 [packed = true];
/**
* repeated bool isCached = 6 [packed = true];
*
*
* if a location in locs is cached
*
*/
java.util.List getIsCachedList();
/**
* repeated bool isCached = 6 [packed = true];
*
*
* if a location in locs is cached
*
*/
int getIsCachedCount();
/**
* repeated bool isCached = 6 [packed = true];
*
*
* if a location in locs is cached
*
*/
boolean getIsCached(int index);
// repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
*/
java.util.List getStorageTypesList();
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
*/
int getStorageTypesCount();
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index);
// repeated string storageIDs = 8;
/**
* repeated string storageIDs = 8;
*/
java.util.List
getStorageIDsList();
/**
* repeated string storageIDs = 8;
*/
int getStorageIDsCount();
/**
* repeated string storageIDs = 8;
*/
java.lang.String getStorageIDs(int index);
/**
* repeated string storageIDs = 8;
*/
com.google.protobuf.ByteString
getStorageIDsBytes(int index);
// optional bytes blockIndices = 9;
/**
* optional bytes blockIndices = 9;
*
*
* striped block related fields
*
*/
boolean hasBlockIndices();
/**
* optional bytes blockIndices = 9;
*
*
* striped block related fields
*
*/
com.google.protobuf.ByteString getBlockIndices();
// repeated .hadoop.common.TokenProto blockTokens = 10;
/**
* repeated .hadoop.common.TokenProto blockTokens = 10;
*
*
* each internal block has a block token
*
*/
java.util.List
getBlockTokensList();
/**
* repeated .hadoop.common.TokenProto blockTokens = 10;
*
*
* each internal block has a block token
*
*/
org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockTokens(int index);
/**
* repeated .hadoop.common.TokenProto blockTokens = 10;
*
*
* each internal block has a block token
*
*/
int getBlockTokensCount();
/**
* repeated .hadoop.common.TokenProto blockTokens = 10;
*
*
* each internal block has a block token
*
*/
java.util.List extends org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>
getBlockTokensOrBuilderList();
/**
* repeated .hadoop.common.TokenProto blockTokens = 10;
*
*
* each internal block has a block token
*
*/
org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokensOrBuilder(
int index);
}
/**
* Protobuf type {@code hadoop.hdfs.LocatedBlockProto}
*
*
**
* A LocatedBlock gives information about a block and its location.
*
*/
public static final class LocatedBlockProto extends
com.google.protobuf.GeneratedMessage
implements LocatedBlockProtoOrBuilder {
// Use LocatedBlockProto.newBuilder() to construct.
private LocatedBlockProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private LocatedBlockProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final LocatedBlockProto defaultInstance;
public static LocatedBlockProto getDefaultInstance() {
return defaultInstance;
}
public LocatedBlockProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private LocatedBlockProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = b_.toBuilder();
}
b_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(b_);
b_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 16: {
bitField0_ |= 0x00000002;
offset_ = input.readUInt64();
break;
}
case 26: {
if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
locs_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000004;
}
locs_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.PARSER, extensionRegistry));
break;
}
case 32: {
bitField0_ |= 0x00000004;
corrupt_ = input.readBool();
break;
}
case 42: {
org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000008) == 0x00000008)) {
subBuilder = blockToken_.toBuilder();
}
blockToken_ = input.readMessage(org.apache.hadoop.security.proto.SecurityProtos.TokenProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(blockToken_);
blockToken_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000008;
break;
}
case 48: {
if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
isCached_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000020;
}
isCached_.add(input.readBool());
break;
}
case 50: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00000020) == 0x00000020) && input.getBytesUntilLimit() > 0) {
isCached_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000020;
}
while (input.getBytesUntilLimit() > 0) {
isCached_.add(input.readBool());
}
input.popLimit(limit);
break;
}
case 56: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(7, rawValue);
} else {
if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
storageTypes_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000040;
}
storageTypes_.add(value);
}
break;
}
case 58: {
int length = input.readRawVarint32();
int oldLimit = input.pushLimit(length);
while(input.getBytesUntilLimit() > 0) {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(7, rawValue);
} else {
if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
storageTypes_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000040;
}
storageTypes_.add(value);
}
}
input.popLimit(oldLimit);
break;
}
case 66: {
if (!((mutable_bitField0_ & 0x00000080) == 0x00000080)) {
storageIDs_ = new com.google.protobuf.LazyStringArrayList();
mutable_bitField0_ |= 0x00000080;
}
storageIDs_.add(input.readBytes());
break;
}
case 74: {
bitField0_ |= 0x00000010;
blockIndices_ = input.readBytes();
break;
}
case 82: {
if (!((mutable_bitField0_ & 0x00000200) == 0x00000200)) {
blockTokens_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000200;
}
blockTokens_.add(input.readMessage(org.apache.hadoop.security.proto.SecurityProtos.TokenProto.PARSER, extensionRegistry));
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
locs_ = java.util.Collections.unmodifiableList(locs_);
}
if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
isCached_ = java.util.Collections.unmodifiableList(isCached_);
}
if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
storageTypes_ = java.util.Collections.unmodifiableList(storageTypes_);
}
if (((mutable_bitField0_ & 0x00000080) == 0x00000080)) {
storageIDs_ = new com.google.protobuf.UnmodifiableLazyStringList(storageIDs_);
}
if (((mutable_bitField0_ & 0x00000200) == 0x00000200)) {
blockTokens_ = java.util.Collections.unmodifiableList(blockTokens_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlockProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public LocatedBlockProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new LocatedBlockProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.ExtendedBlockProto b = 1;
public static final int B_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto b_;
/**
* required .hadoop.hdfs.ExtendedBlockProto b = 1;
*/
public boolean hasB() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ExtendedBlockProto b = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB() {
return b_;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto b = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder() {
return b_;
}
// required uint64 offset = 2;
public static final int OFFSET_FIELD_NUMBER = 2;
private long offset_;
/**
* required uint64 offset = 2;
*
*
* offset of first byte of block in the file
*
*/
public boolean hasOffset() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 offset = 2;
*
*
* offset of first byte of block in the file
*
*/
public long getOffset() {
return offset_;
}
// repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
public static final int LOCS_FIELD_NUMBER = 3;
private java.util.List locs_;
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public java.util.List getLocsList() {
return locs_;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getLocsOrBuilderList() {
return locs_;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public int getLocsCount() {
return locs_.size();
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getLocs(int index) {
return locs_.get(index);
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getLocsOrBuilder(
int index) {
return locs_.get(index);
}
// required bool corrupt = 4;
public static final int CORRUPT_FIELD_NUMBER = 4;
private boolean corrupt_;
/**
* required bool corrupt = 4;
*
*
* true if all replicas of a block are corrupt, else false
*
*/
public boolean hasCorrupt() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required bool corrupt = 4;
*
*
* true if all replicas of a block are corrupt, else false
*
*/
public boolean getCorrupt() {
return corrupt_;
}
// required .hadoop.common.TokenProto blockToken = 5;
public static final int BLOCKTOKEN_FIELD_NUMBER = 5;
private org.apache.hadoop.security.proto.SecurityProtos.TokenProto blockToken_;
/**
* required .hadoop.common.TokenProto blockToken = 5;
*/
public boolean hasBlockToken() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required .hadoop.common.TokenProto blockToken = 5;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockToken() {
return blockToken_;
}
/**
* required .hadoop.common.TokenProto blockToken = 5;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokenOrBuilder() {
return blockToken_;
}
// repeated bool isCached = 6 [packed = true];
public static final int ISCACHED_FIELD_NUMBER = 6;
private java.util.List isCached_;
/**
* repeated bool isCached = 6 [packed = true];
*
*
* if a location in locs is cached
*
*/
public java.util.List
getIsCachedList() {
return isCached_;
}
/**
* repeated bool isCached = 6 [packed = true];
*
*
* if a location in locs is cached
*
*/
public int getIsCachedCount() {
return isCached_.size();
}
/**
* repeated bool isCached = 6 [packed = true];
*
*
* if a location in locs is cached
*
*/
public boolean getIsCached(int index) {
return isCached_.get(index);
}
private int isCachedMemoizedSerializedSize = -1;
// repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
public static final int STORAGETYPES_FIELD_NUMBER = 7;
private java.util.List storageTypes_;
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
*/
public java.util.List getStorageTypesList() {
return storageTypes_;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
*/
public int getStorageTypesCount() {
return storageTypes_.size();
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) {
return storageTypes_.get(index);
}
// repeated string storageIDs = 8;
public static final int STORAGEIDS_FIELD_NUMBER = 8;
private com.google.protobuf.LazyStringList storageIDs_;
/**
* repeated string storageIDs = 8;
*/
public java.util.List
getStorageIDsList() {
return storageIDs_;
}
/**
* repeated string storageIDs = 8;
*/
public int getStorageIDsCount() {
return storageIDs_.size();
}
/**
* repeated string storageIDs = 8;
*/
public java.lang.String getStorageIDs(int index) {
return storageIDs_.get(index);
}
/**
* repeated string storageIDs = 8;
*/
public com.google.protobuf.ByteString
getStorageIDsBytes(int index) {
return storageIDs_.getByteString(index);
}
// optional bytes blockIndices = 9;
public static final int BLOCKINDICES_FIELD_NUMBER = 9;
private com.google.protobuf.ByteString blockIndices_;
/**
* optional bytes blockIndices = 9;
*
*
* striped block related fields
*
*/
public boolean hasBlockIndices() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional bytes blockIndices = 9;
*
*
* striped block related fields
*
*/
public com.google.protobuf.ByteString getBlockIndices() {
return blockIndices_;
}
// repeated .hadoop.common.TokenProto blockTokens = 10;
public static final int BLOCKTOKENS_FIELD_NUMBER = 10;
private java.util.List blockTokens_;
/**
* repeated .hadoop.common.TokenProto blockTokens = 10;
*
*
* each internal block has a block token
*
*/
public java.util.List getBlockTokensList() {
return blockTokens_;
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 10;
*
*
* each internal block has a block token
*
*/
public java.util.List extends org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>
getBlockTokensOrBuilderList() {
return blockTokens_;
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 10;
*
*
* each internal block has a block token
*
*/
public int getBlockTokensCount() {
return blockTokens_.size();
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 10;
*
*
* each internal block has a block token
*
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockTokens(int index) {
return blockTokens_.get(index);
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 10;
*
*
* each internal block has a block token
*
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokensOrBuilder(
int index) {
return blockTokens_.get(index);
}
private void initFields() {
b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
offset_ = 0L;
locs_ = java.util.Collections.emptyList();
corrupt_ = false;
blockToken_ = org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance();
isCached_ = java.util.Collections.emptyList();
storageTypes_ = java.util.Collections.emptyList();
storageIDs_ = com.google.protobuf.LazyStringArrayList.EMPTY;
blockIndices_ = com.google.protobuf.ByteString.EMPTY;
blockTokens_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasB()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasOffset()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasCorrupt()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasBlockToken()) {
memoizedIsInitialized = 0;
return false;
}
if (!getB().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getLocsCount(); i++) {
if (!getLocs(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (!getBlockToken().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getBlockTokensCount(); i++) {
if (!getBlockTokens(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, b_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, offset_);
}
for (int i = 0; i < locs_.size(); i++) {
output.writeMessage(3, locs_.get(i));
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBool(4, corrupt_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeMessage(5, blockToken_);
}
if (getIsCachedList().size() > 0) {
output.writeRawVarint32(50);
output.writeRawVarint32(isCachedMemoizedSerializedSize);
}
for (int i = 0; i < isCached_.size(); i++) {
output.writeBoolNoTag(isCached_.get(i));
}
for (int i = 0; i < storageTypes_.size(); i++) {
output.writeEnum(7, storageTypes_.get(i).getNumber());
}
for (int i = 0; i < storageIDs_.size(); i++) {
output.writeBytes(8, storageIDs_.getByteString(i));
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeBytes(9, blockIndices_);
}
for (int i = 0; i < blockTokens_.size(); i++) {
output.writeMessage(10, blockTokens_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, b_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, offset_);
}
for (int i = 0; i < locs_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, locs_.get(i));
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(4, corrupt_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(5, blockToken_);
}
{
int dataSize = 0;
dataSize = 1 * getIsCachedList().size();
size += dataSize;
if (!getIsCachedList().isEmpty()) {
size += 1;
size += com.google.protobuf.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
isCachedMemoizedSerializedSize = dataSize;
}
{
int dataSize = 0;
for (int i = 0; i < storageTypes_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeEnumSizeNoTag(storageTypes_.get(i).getNumber());
}
size += dataSize;
size += 1 * storageTypes_.size();
}
{
int dataSize = 0;
for (int i = 0; i < storageIDs_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeBytesSizeNoTag(storageIDs_.getByteString(i));
}
size += dataSize;
size += 1 * getStorageIDsList().size();
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(9, blockIndices_);
}
for (int i = 0; i < blockTokens_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(10, blockTokens_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto) obj;
boolean result = true;
result = result && (hasB() == other.hasB());
if (hasB()) {
result = result && getB()
.equals(other.getB());
}
result = result && (hasOffset() == other.hasOffset());
if (hasOffset()) {
result = result && (getOffset()
== other.getOffset());
}
result = result && getLocsList()
.equals(other.getLocsList());
result = result && (hasCorrupt() == other.hasCorrupt());
if (hasCorrupt()) {
result = result && (getCorrupt()
== other.getCorrupt());
}
result = result && (hasBlockToken() == other.hasBlockToken());
if (hasBlockToken()) {
result = result && getBlockToken()
.equals(other.getBlockToken());
}
result = result && getIsCachedList()
.equals(other.getIsCachedList());
result = result && getStorageTypesList()
.equals(other.getStorageTypesList());
result = result && getStorageIDsList()
.equals(other.getStorageIDsList());
result = result && (hasBlockIndices() == other.hasBlockIndices());
if (hasBlockIndices()) {
result = result && getBlockIndices()
.equals(other.getBlockIndices());
}
result = result && getBlockTokensList()
.equals(other.getBlockTokensList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasB()) {
hash = (37 * hash) + B_FIELD_NUMBER;
hash = (53 * hash) + getB().hashCode();
}
if (hasOffset()) {
hash = (37 * hash) + OFFSET_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getOffset());
}
if (getLocsCount() > 0) {
hash = (37 * hash) + LOCS_FIELD_NUMBER;
hash = (53 * hash) + getLocsList().hashCode();
}
if (hasCorrupt()) {
hash = (37 * hash) + CORRUPT_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getCorrupt());
}
if (hasBlockToken()) {
hash = (37 * hash) + BLOCKTOKEN_FIELD_NUMBER;
hash = (53 * hash) + getBlockToken().hashCode();
}
if (getIsCachedCount() > 0) {
hash = (37 * hash) + ISCACHED_FIELD_NUMBER;
hash = (53 * hash) + getIsCachedList().hashCode();
}
if (getStorageTypesCount() > 0) {
hash = (37 * hash) + STORAGETYPES_FIELD_NUMBER;
hash = (53 * hash) + hashEnumList(getStorageTypesList());
}
if (getStorageIDsCount() > 0) {
hash = (37 * hash) + STORAGEIDS_FIELD_NUMBER;
hash = (53 * hash) + getStorageIDsList().hashCode();
}
if (hasBlockIndices()) {
hash = (37 * hash) + BLOCKINDICES_FIELD_NUMBER;
hash = (53 * hash) + getBlockIndices().hashCode();
}
if (getBlockTokensCount() > 0) {
hash = (37 * hash) + BLOCKTOKENS_FIELD_NUMBER;
hash = (53 * hash) + getBlockTokensList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.LocatedBlockProto}
*
*
**
* A LocatedBlock gives information about a block and its location.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlockProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getBFieldBuilder();
getLocsFieldBuilder();
getBlockTokenFieldBuilder();
getBlockTokensFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (bBuilder_ == null) {
b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
} else {
bBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
offset_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
if (locsBuilder_ == null) {
locs_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
} else {
locsBuilder_.clear();
}
corrupt_ = false;
bitField0_ = (bitField0_ & ~0x00000008);
if (blockTokenBuilder_ == null) {
blockToken_ = org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance();
} else {
blockTokenBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000010);
isCached_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000020);
storageTypes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000040);
storageIDs_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000080);
blockIndices_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000100);
if (blockTokensBuilder_ == null) {
blockTokens_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000200);
} else {
blockTokensBuilder_.clear();
}
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlockProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (bBuilder_ == null) {
result.b_ = b_;
} else {
result.b_ = bBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.offset_ = offset_;
if (locsBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004)) {
locs_ = java.util.Collections.unmodifiableList(locs_);
bitField0_ = (bitField0_ & ~0x00000004);
}
result.locs_ = locs_;
} else {
result.locs_ = locsBuilder_.build();
}
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000004;
}
result.corrupt_ = corrupt_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000008;
}
if (blockTokenBuilder_ == null) {
result.blockToken_ = blockToken_;
} else {
result.blockToken_ = blockTokenBuilder_.build();
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
isCached_ = java.util.Collections.unmodifiableList(isCached_);
bitField0_ = (bitField0_ & ~0x00000020);
}
result.isCached_ = isCached_;
if (((bitField0_ & 0x00000040) == 0x00000040)) {
storageTypes_ = java.util.Collections.unmodifiableList(storageTypes_);
bitField0_ = (bitField0_ & ~0x00000040);
}
result.storageTypes_ = storageTypes_;
if (((bitField0_ & 0x00000080) == 0x00000080)) {
storageIDs_ = new com.google.protobuf.UnmodifiableLazyStringList(
storageIDs_);
bitField0_ = (bitField0_ & ~0x00000080);
}
result.storageIDs_ = storageIDs_;
if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
to_bitField0_ |= 0x00000010;
}
result.blockIndices_ = blockIndices_;
if (blockTokensBuilder_ == null) {
if (((bitField0_ & 0x00000200) == 0x00000200)) {
blockTokens_ = java.util.Collections.unmodifiableList(blockTokens_);
bitField0_ = (bitField0_ & ~0x00000200);
}
result.blockTokens_ = blockTokens_;
} else {
result.blockTokens_ = blockTokensBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()) return this;
if (other.hasB()) {
mergeB(other.getB());
}
if (other.hasOffset()) {
setOffset(other.getOffset());
}
if (locsBuilder_ == null) {
if (!other.locs_.isEmpty()) {
if (locs_.isEmpty()) {
locs_ = other.locs_;
bitField0_ = (bitField0_ & ~0x00000004);
} else {
ensureLocsIsMutable();
locs_.addAll(other.locs_);
}
onChanged();
}
} else {
if (!other.locs_.isEmpty()) {
if (locsBuilder_.isEmpty()) {
locsBuilder_.dispose();
locsBuilder_ = null;
locs_ = other.locs_;
bitField0_ = (bitField0_ & ~0x00000004);
locsBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getLocsFieldBuilder() : null;
} else {
locsBuilder_.addAllMessages(other.locs_);
}
}
}
if (other.hasCorrupt()) {
setCorrupt(other.getCorrupt());
}
if (other.hasBlockToken()) {
mergeBlockToken(other.getBlockToken());
}
if (!other.isCached_.isEmpty()) {
if (isCached_.isEmpty()) {
isCached_ = other.isCached_;
bitField0_ = (bitField0_ & ~0x00000020);
} else {
ensureIsCachedIsMutable();
isCached_.addAll(other.isCached_);
}
onChanged();
}
if (!other.storageTypes_.isEmpty()) {
if (storageTypes_.isEmpty()) {
storageTypes_ = other.storageTypes_;
bitField0_ = (bitField0_ & ~0x00000040);
} else {
ensureStorageTypesIsMutable();
storageTypes_.addAll(other.storageTypes_);
}
onChanged();
}
if (!other.storageIDs_.isEmpty()) {
if (storageIDs_.isEmpty()) {
storageIDs_ = other.storageIDs_;
bitField0_ = (bitField0_ & ~0x00000080);
} else {
ensureStorageIDsIsMutable();
storageIDs_.addAll(other.storageIDs_);
}
onChanged();
}
if (other.hasBlockIndices()) {
setBlockIndices(other.getBlockIndices());
}
if (blockTokensBuilder_ == null) {
if (!other.blockTokens_.isEmpty()) {
if (blockTokens_.isEmpty()) {
blockTokens_ = other.blockTokens_;
bitField0_ = (bitField0_ & ~0x00000200);
} else {
ensureBlockTokensIsMutable();
blockTokens_.addAll(other.blockTokens_);
}
onChanged();
}
} else {
if (!other.blockTokens_.isEmpty()) {
if (blockTokensBuilder_.isEmpty()) {
blockTokensBuilder_.dispose();
blockTokensBuilder_ = null;
blockTokens_ = other.blockTokens_;
bitField0_ = (bitField0_ & ~0x00000200);
blockTokensBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getBlockTokensFieldBuilder() : null;
} else {
blockTokensBuilder_.addAllMessages(other.blockTokens_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasB()) {
return false;
}
if (!hasOffset()) {
return false;
}
if (!hasCorrupt()) {
return false;
}
if (!hasBlockToken()) {
return false;
}
if (!getB().isInitialized()) {
return false;
}
for (int i = 0; i < getLocsCount(); i++) {
if (!getLocs(i).isInitialized()) {
return false;
}
}
if (!getBlockToken().isInitialized()) {
return false;
}
for (int i = 0; i < getBlockTokensCount(); i++) {
if (!getBlockTokens(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.ExtendedBlockProto b = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> bBuilder_;
/**
* required .hadoop.hdfs.ExtendedBlockProto b = 1;
*/
public boolean hasB() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ExtendedBlockProto b = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB() {
if (bBuilder_ == null) {
return b_;
} else {
return bBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ExtendedBlockProto b = 1;
*/
public Builder setB(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
if (bBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
b_ = value;
onChanged();
} else {
bBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto b = 1;
*/
public Builder setB(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) {
if (bBuilder_ == null) {
b_ = builderForValue.build();
onChanged();
} else {
bBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto b = 1;
*/
public Builder mergeB(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
if (bBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
b_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) {
b_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(b_).mergeFrom(value).buildPartial();
} else {
b_ = value;
}
onChanged();
} else {
bBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto b = 1;
*/
public Builder clearB() {
if (bBuilder_ == null) {
b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
onChanged();
} else {
bBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto b = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getBFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ExtendedBlockProto b = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder() {
if (bBuilder_ != null) {
return bBuilder_.getMessageOrBuilder();
} else {
return b_;
}
}
/**
* required .hadoop.hdfs.ExtendedBlockProto b = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>
getBFieldBuilder() {
if (bBuilder_ == null) {
bBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>(
b_,
getParentForChildren(),
isClean());
b_ = null;
}
return bBuilder_;
}
// required uint64 offset = 2;
private long offset_ ;
/**
* required uint64 offset = 2;
*
*
* offset of first byte of block in the file
*
*/
public boolean hasOffset() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 offset = 2;
*
*
* offset of first byte of block in the file
*
*/
public long getOffset() {
return offset_;
}
/**
* required uint64 offset = 2;
*
*
* offset of first byte of block in the file
*
*/
public Builder setOffset(long value) {
bitField0_ |= 0x00000002;
offset_ = value;
onChanged();
return this;
}
/**
* required uint64 offset = 2;
*
*
* offset of first byte of block in the file
*
*/
public Builder clearOffset() {
bitField0_ = (bitField0_ & ~0x00000002);
offset_ = 0L;
onChanged();
return this;
}
// repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
private java.util.List locs_ =
java.util.Collections.emptyList();
private void ensureLocsIsMutable() {
if (!((bitField0_ & 0x00000004) == 0x00000004)) {
locs_ = new java.util.ArrayList(locs_);
bitField0_ |= 0x00000004;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> locsBuilder_;
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public java.util.List getLocsList() {
if (locsBuilder_ == null) {
return java.util.Collections.unmodifiableList(locs_);
} else {
return locsBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public int getLocsCount() {
if (locsBuilder_ == null) {
return locs_.size();
} else {
return locsBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getLocs(int index) {
if (locsBuilder_ == null) {
return locs_.get(index);
} else {
return locsBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public Builder setLocs(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (locsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureLocsIsMutable();
locs_.set(index, value);
onChanged();
} else {
locsBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public Builder setLocs(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (locsBuilder_ == null) {
ensureLocsIsMutable();
locs_.set(index, builderForValue.build());
onChanged();
} else {
locsBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public Builder addLocs(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (locsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureLocsIsMutable();
locs_.add(value);
onChanged();
} else {
locsBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public Builder addLocs(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (locsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureLocsIsMutable();
locs_.add(index, value);
onChanged();
} else {
locsBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public Builder addLocs(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (locsBuilder_ == null) {
ensureLocsIsMutable();
locs_.add(builderForValue.build());
onChanged();
} else {
locsBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public Builder addLocs(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (locsBuilder_ == null) {
ensureLocsIsMutable();
locs_.add(index, builderForValue.build());
onChanged();
} else {
locsBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public Builder addAllLocs(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> values) {
if (locsBuilder_ == null) {
ensureLocsIsMutable();
super.addAll(values, locs_);
onChanged();
} else {
locsBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public Builder clearLocs() {
if (locsBuilder_ == null) {
locs_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
onChanged();
} else {
locsBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public Builder removeLocs(int index) {
if (locsBuilder_ == null) {
ensureLocsIsMutable();
locs_.remove(index);
onChanged();
} else {
locsBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getLocsBuilder(
int index) {
return getLocsFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getLocsOrBuilder(
int index) {
if (locsBuilder_ == null) {
return locs_.get(index); } else {
return locsBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getLocsOrBuilderList() {
if (locsBuilder_ != null) {
return locsBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(locs_);
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addLocsBuilder() {
return getLocsFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addLocsBuilder(
int index) {
return getLocsFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;
*
*
* Locations ordered by proximity to client ip
*
*/
public java.util.List
getLocsBuilderList() {
return getLocsFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getLocsFieldBuilder() {
if (locsBuilder_ == null) {
locsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>(
locs_,
((bitField0_ & 0x00000004) == 0x00000004),
getParentForChildren(),
isClean());
locs_ = null;
}
return locsBuilder_;
}
// required bool corrupt = 4;
private boolean corrupt_ ;
/**
* required bool corrupt = 4;
*
*
* true if all replicas of a block are corrupt, else false
*
*/
public boolean hasCorrupt() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required bool corrupt = 4;
*
*
* true if all replicas of a block are corrupt, else false
*
*/
public boolean getCorrupt() {
return corrupt_;
}
/**
* required bool corrupt = 4;
*
*
* true if all replicas of a block are corrupt, else false
*
*/
public Builder setCorrupt(boolean value) {
bitField0_ |= 0x00000008;
corrupt_ = value;
onChanged();
return this;
}
/**
* required bool corrupt = 4;
*
*
* true if all replicas of a block are corrupt, else false
*
*/
public Builder clearCorrupt() {
bitField0_ = (bitField0_ & ~0x00000008);
corrupt_ = false;
onChanged();
return this;
}
// required .hadoop.common.TokenProto blockToken = 5;
private org.apache.hadoop.security.proto.SecurityProtos.TokenProto blockToken_ = org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> blockTokenBuilder_;
/**
* required .hadoop.common.TokenProto blockToken = 5;
*/
public boolean hasBlockToken() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required .hadoop.common.TokenProto blockToken = 5;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockToken() {
if (blockTokenBuilder_ == null) {
return blockToken_;
} else {
return blockTokenBuilder_.getMessage();
}
}
/**
* required .hadoop.common.TokenProto blockToken = 5;
*/
public Builder setBlockToken(org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
if (blockTokenBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
blockToken_ = value;
onChanged();
} else {
blockTokenBuilder_.setMessage(value);
}
bitField0_ |= 0x00000010;
return this;
}
/**
* required .hadoop.common.TokenProto blockToken = 5;
*/
public Builder setBlockToken(
org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) {
if (blockTokenBuilder_ == null) {
blockToken_ = builderForValue.build();
onChanged();
} else {
blockTokenBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000010;
return this;
}
/**
* required .hadoop.common.TokenProto blockToken = 5;
*/
public Builder mergeBlockToken(org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
if (blockTokenBuilder_ == null) {
if (((bitField0_ & 0x00000010) == 0x00000010) &&
blockToken_ != org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance()) {
blockToken_ =
org.apache.hadoop.security.proto.SecurityProtos.TokenProto.newBuilder(blockToken_).mergeFrom(value).buildPartial();
} else {
blockToken_ = value;
}
onChanged();
} else {
blockTokenBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000010;
return this;
}
/**
* required .hadoop.common.TokenProto blockToken = 5;
*/
public Builder clearBlockToken() {
if (blockTokenBuilder_ == null) {
blockToken_ = org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance();
onChanged();
} else {
blockTokenBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
/**
* required .hadoop.common.TokenProto blockToken = 5;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder getBlockTokenBuilder() {
bitField0_ |= 0x00000010;
onChanged();
return getBlockTokenFieldBuilder().getBuilder();
}
/**
* required .hadoop.common.TokenProto blockToken = 5;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokenOrBuilder() {
if (blockTokenBuilder_ != null) {
return blockTokenBuilder_.getMessageOrBuilder();
} else {
return blockToken_;
}
}
/**
* required .hadoop.common.TokenProto blockToken = 5;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>
getBlockTokenFieldBuilder() {
if (blockTokenBuilder_ == null) {
blockTokenBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>(
blockToken_,
getParentForChildren(),
isClean());
blockToken_ = null;
}
return blockTokenBuilder_;
}
// repeated bool isCached = 6 [packed = true];
private java.util.List isCached_ = java.util.Collections.emptyList();
private void ensureIsCachedIsMutable() {
if (!((bitField0_ & 0x00000020) == 0x00000020)) {
isCached_ = new java.util.ArrayList(isCached_);
bitField0_ |= 0x00000020;
}
}
/**
* repeated bool isCached = 6 [packed = true];
*
*
* if a location in locs is cached
*
*/
public java.util.List
getIsCachedList() {
return java.util.Collections.unmodifiableList(isCached_);
}
/**
* repeated bool isCached = 6 [packed = true];
*
*
* if a location in locs is cached
*
*/
public int getIsCachedCount() {
return isCached_.size();
}
/**
* repeated bool isCached = 6 [packed = true];
*
*
* if a location in locs is cached
*
*/
public boolean getIsCached(int index) {
return isCached_.get(index);
}
/**
* repeated bool isCached = 6 [packed = true];
*
*
* if a location in locs is cached
*
*/
public Builder setIsCached(
int index, boolean value) {
ensureIsCachedIsMutable();
isCached_.set(index, value);
onChanged();
return this;
}
/**
* repeated bool isCached = 6 [packed = true];
*
*
* if a location in locs is cached
*
*/
public Builder addIsCached(boolean value) {
ensureIsCachedIsMutable();
isCached_.add(value);
onChanged();
return this;
}
/**
* repeated bool isCached = 6 [packed = true];
*
*
* if a location in locs is cached
*
*/
public Builder addAllIsCached(
java.lang.Iterable extends java.lang.Boolean> values) {
ensureIsCachedIsMutable();
super.addAll(values, isCached_);
onChanged();
return this;
}
/**
* repeated bool isCached = 6 [packed = true];
*
*
* if a location in locs is cached
*
*/
public Builder clearIsCached() {
isCached_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000020);
onChanged();
return this;
}
// repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
private java.util.List storageTypes_ =
java.util.Collections.emptyList();
private void ensureStorageTypesIsMutable() {
if (!((bitField0_ & 0x00000040) == 0x00000040)) {
storageTypes_ = new java.util.ArrayList(storageTypes_);
bitField0_ |= 0x00000040;
}
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
*/
public java.util.List getStorageTypesList() {
return java.util.Collections.unmodifiableList(storageTypes_);
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
*/
public int getStorageTypesCount() {
return storageTypes_.size();
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) {
return storageTypes_.get(index);
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
*/
public Builder setStorageTypes(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
ensureStorageTypesIsMutable();
storageTypes_.set(index, value);
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
*/
public Builder addStorageTypes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
ensureStorageTypesIsMutable();
storageTypes_.add(value);
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
*/
public Builder addAllStorageTypes(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> values) {
ensureStorageTypesIsMutable();
super.addAll(values, storageTypes_);
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;
*/
public Builder clearStorageTypes() {
storageTypes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000040);
onChanged();
return this;
}
// repeated string storageIDs = 8;
private com.google.protobuf.LazyStringList storageIDs_ = com.google.protobuf.LazyStringArrayList.EMPTY;
private void ensureStorageIDsIsMutable() {
if (!((bitField0_ & 0x00000080) == 0x00000080)) {
storageIDs_ = new com.google.protobuf.LazyStringArrayList(storageIDs_);
bitField0_ |= 0x00000080;
}
}
/**
* repeated string storageIDs = 8;
*/
public java.util.List
getStorageIDsList() {
return java.util.Collections.unmodifiableList(storageIDs_);
}
/**
* repeated string storageIDs = 8;
*/
public int getStorageIDsCount() {
return storageIDs_.size();
}
/**
* repeated string storageIDs = 8;
*/
public java.lang.String getStorageIDs(int index) {
return storageIDs_.get(index);
}
/**
* repeated string storageIDs = 8;
*/
public com.google.protobuf.ByteString
getStorageIDsBytes(int index) {
return storageIDs_.getByteString(index);
}
/**
* repeated string storageIDs = 8;
*/
public Builder setStorageIDs(
int index, java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureStorageIDsIsMutable();
storageIDs_.set(index, value);
onChanged();
return this;
}
/**
* repeated string storageIDs = 8;
*/
public Builder addStorageIDs(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureStorageIDsIsMutable();
storageIDs_.add(value);
onChanged();
return this;
}
/**
* repeated string storageIDs = 8;
*/
public Builder addAllStorageIDs(
java.lang.Iterable values) {
ensureStorageIDsIsMutable();
super.addAll(values, storageIDs_);
onChanged();
return this;
}
/**
* repeated string storageIDs = 8;
*/
public Builder clearStorageIDs() {
storageIDs_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000080);
onChanged();
return this;
}
/**
* repeated string storageIDs = 8;
*/
public Builder addStorageIDsBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
ensureStorageIDsIsMutable();
storageIDs_.add(value);
onChanged();
return this;
}
// optional bytes blockIndices = 9;
private com.google.protobuf.ByteString blockIndices_ = com.google.protobuf.ByteString.EMPTY;
/**
* optional bytes blockIndices = 9;
*
*
* striped block related fields
*
*/
public boolean hasBlockIndices() {
return ((bitField0_ & 0x00000100) == 0x00000100);
}
/**
* optional bytes blockIndices = 9;
*
*
* striped block related fields
*
*/
public com.google.protobuf.ByteString getBlockIndices() {
return blockIndices_;
}
/**
* optional bytes blockIndices = 9;
*
*
* striped block related fields
*
*/
public Builder setBlockIndices(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000100;
blockIndices_ = value;
onChanged();
return this;
}
/**
* optional bytes blockIndices = 9;
*
*
* striped block related fields
*
*/
public Builder clearBlockIndices() {
bitField0_ = (bitField0_ & ~0x00000100);
blockIndices_ = getDefaultInstance().getBlockIndices();
onChanged();
return this;
}
// repeated .hadoop.common.TokenProto blockTokens = 10;
private java.util.List blockTokens_ =
java.util.Collections.emptyList();
private void ensureBlockTokensIsMutable() {
if (!((bitField0_ & 0x00000200) == 0x00000200)) {
blockTokens_ = new java.util.ArrayList(blockTokens_);
bitField0_ |= 0x00000200;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> blockTokensBuilder_;
/**
* repeated .hadoop.common.TokenProto blockTokens = 10;
*
*
* each internal block has a block token
*
*/
public java.util.List getBlockTokensList() {
if (blockTokensBuilder_ == null) {
return java.util.Collections.unmodifiableList(blockTokens_);
} else {
return blockTokensBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 10;
*
*
* each internal block has a block token
*
*/
public int getBlockTokensCount() {
if (blockTokensBuilder_ == null) {
return blockTokens_.size();
} else {
return blockTokensBuilder_.getCount();
}
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 10;
*
*
* each internal block has a block token
*
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockTokens(int index) {
if (blockTokensBuilder_ == null) {
return blockTokens_.get(index);
} else {
return blockTokensBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 10;
*
*
* each internal block has a block token
*
*/
public Builder setBlockTokens(
int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
if (blockTokensBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlockTokensIsMutable();
blockTokens_.set(index, value);
onChanged();
} else {
blockTokensBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 10;
*
*
* each internal block has a block token
*
*/
public Builder setBlockTokens(
int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) {
if (blockTokensBuilder_ == null) {
ensureBlockTokensIsMutable();
blockTokens_.set(index, builderForValue.build());
onChanged();
} else {
blockTokensBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 10;
*
*
* each internal block has a block token
*
*/
public Builder addBlockTokens(org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
if (blockTokensBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlockTokensIsMutable();
blockTokens_.add(value);
onChanged();
} else {
blockTokensBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 10;
*
*
* each internal block has a block token
*
*/
public Builder addBlockTokens(
int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
if (blockTokensBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlockTokensIsMutable();
blockTokens_.add(index, value);
onChanged();
} else {
blockTokensBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 10;
*
*
* each internal block has a block token
*
*/
public Builder addBlockTokens(
org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) {
if (blockTokensBuilder_ == null) {
ensureBlockTokensIsMutable();
blockTokens_.add(builderForValue.build());
onChanged();
} else {
blockTokensBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 10;
*
*
* each internal block has a block token
*
*/
public Builder addBlockTokens(
int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) {
if (blockTokensBuilder_ == null) {
ensureBlockTokensIsMutable();
blockTokens_.add(index, builderForValue.build());
onChanged();
} else {
blockTokensBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 10;
*
*
* each internal block has a block token
*
*/
public Builder addAllBlockTokens(
java.lang.Iterable extends org.apache.hadoop.security.proto.SecurityProtos.TokenProto> values) {
if (blockTokensBuilder_ == null) {
ensureBlockTokensIsMutable();
super.addAll(values, blockTokens_);
onChanged();
} else {
blockTokensBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 10;
*
*
* each internal block has a block token
*
*/
public Builder clearBlockTokens() {
if (blockTokensBuilder_ == null) {
blockTokens_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000200);
onChanged();
} else {
blockTokensBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 10;
*
*
* each internal block has a block token
*
*/
public Builder removeBlockTokens(int index) {
if (blockTokensBuilder_ == null) {
ensureBlockTokensIsMutable();
blockTokens_.remove(index);
onChanged();
} else {
blockTokensBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 10;
*
*
* each internal block has a block token
*
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder getBlockTokensBuilder(
int index) {
return getBlockTokensFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 10;
*
*
* each internal block has a block token
*
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokensOrBuilder(
int index) {
if (blockTokensBuilder_ == null) {
return blockTokens_.get(index); } else {
return blockTokensBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 10;
*
*
* each internal block has a block token
*
*/
public java.util.List extends org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>
getBlockTokensOrBuilderList() {
if (blockTokensBuilder_ != null) {
return blockTokensBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(blockTokens_);
}
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 10;
*
*
* each internal block has a block token
*
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder addBlockTokensBuilder() {
return getBlockTokensFieldBuilder().addBuilder(
org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance());
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 10;
*
*
* each internal block has a block token
*
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder addBlockTokensBuilder(
int index) {
return getBlockTokensFieldBuilder().addBuilder(
index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance());
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 10;
*
*
* each internal block has a block token
*
*/
public java.util.List
getBlockTokensBuilderList() {
return getBlockTokensFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>
getBlockTokensFieldBuilder() {
if (blockTokensBuilder_ == null) {
blockTokensBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>(
blockTokens_,
((bitField0_ & 0x00000200) == 0x00000200),
getParentForChildren(),
isClean());
blockTokens_ = null;
}
return blockTokensBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.LocatedBlockProto)
}
static {
defaultInstance = new LocatedBlockProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.LocatedBlockProto)
}
public interface DataEncryptionKeyProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required uint32 keyId = 1;
/**
* required uint32 keyId = 1;
*/
boolean hasKeyId();
/**
* required uint32 keyId = 1;
*/
int getKeyId();
// required string blockPoolId = 2;
/**
* required string blockPoolId = 2;
*/
boolean hasBlockPoolId();
/**
* required string blockPoolId = 2;
*/
java.lang.String getBlockPoolId();
/**
* required string blockPoolId = 2;
*/
com.google.protobuf.ByteString
getBlockPoolIdBytes();
// required bytes nonce = 3;
/**
* required bytes nonce = 3;
*/
boolean hasNonce();
/**
* required bytes nonce = 3;
*/
com.google.protobuf.ByteString getNonce();
// required bytes encryptionKey = 4;
/**
* required bytes encryptionKey = 4;
*/
boolean hasEncryptionKey();
/**
* required bytes encryptionKey = 4;
*/
com.google.protobuf.ByteString getEncryptionKey();
// required uint64 expiryDate = 5;
/**
* required uint64 expiryDate = 5;
*/
boolean hasExpiryDate();
/**
* required uint64 expiryDate = 5;
*/
long getExpiryDate();
// optional string encryptionAlgorithm = 6;
/**
* optional string encryptionAlgorithm = 6;
*/
boolean hasEncryptionAlgorithm();
/**
* optional string encryptionAlgorithm = 6;
*/
java.lang.String getEncryptionAlgorithm();
/**
* optional string encryptionAlgorithm = 6;
*/
com.google.protobuf.ByteString
getEncryptionAlgorithmBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.DataEncryptionKeyProto}
*/
public static final class DataEncryptionKeyProto extends
com.google.protobuf.GeneratedMessage
implements DataEncryptionKeyProtoOrBuilder {
// Use DataEncryptionKeyProto.newBuilder() to construct.
private DataEncryptionKeyProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private DataEncryptionKeyProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final DataEncryptionKeyProto defaultInstance;
public static DataEncryptionKeyProto getDefaultInstance() {
return defaultInstance;
}
public DataEncryptionKeyProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DataEncryptionKeyProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
keyId_ = input.readUInt32();
break;
}
case 18: {
bitField0_ |= 0x00000002;
blockPoolId_ = input.readBytes();
break;
}
case 26: {
bitField0_ |= 0x00000004;
nonce_ = input.readBytes();
break;
}
case 34: {
bitField0_ |= 0x00000008;
encryptionKey_ = input.readBytes();
break;
}
case 40: {
bitField0_ |= 0x00000010;
expiryDate_ = input.readUInt64();
break;
}
case 50: {
bitField0_ |= 0x00000020;
encryptionAlgorithm_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DataEncryptionKeyProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DataEncryptionKeyProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public DataEncryptionKeyProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new DataEncryptionKeyProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required uint32 keyId = 1;
public static final int KEYID_FIELD_NUMBER = 1;
private int keyId_;
/**
* required uint32 keyId = 1;
*/
public boolean hasKeyId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint32 keyId = 1;
*/
public int getKeyId() {
return keyId_;
}
// required string blockPoolId = 2;
public static final int BLOCKPOOLID_FIELD_NUMBER = 2;
private java.lang.Object blockPoolId_;
/**
* required string blockPoolId = 2;
*/
public boolean hasBlockPoolId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string blockPoolId = 2;
*/
public java.lang.String getBlockPoolId() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
blockPoolId_ = s;
}
return s;
}
}
/**
* required string blockPoolId = 2;
*/
public com.google.protobuf.ByteString
getBlockPoolIdBytes() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
blockPoolId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required bytes nonce = 3;
public static final int NONCE_FIELD_NUMBER = 3;
private com.google.protobuf.ByteString nonce_;
/**
* required bytes nonce = 3;
*/
public boolean hasNonce() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required bytes nonce = 3;
*/
public com.google.protobuf.ByteString getNonce() {
return nonce_;
}
// required bytes encryptionKey = 4;
public static final int ENCRYPTIONKEY_FIELD_NUMBER = 4;
private com.google.protobuf.ByteString encryptionKey_;
/**
* required bytes encryptionKey = 4;
*/
public boolean hasEncryptionKey() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required bytes encryptionKey = 4;
*/
public com.google.protobuf.ByteString getEncryptionKey() {
return encryptionKey_;
}
// required uint64 expiryDate = 5;
public static final int EXPIRYDATE_FIELD_NUMBER = 5;
private long expiryDate_;
/**
* required uint64 expiryDate = 5;
*/
public boolean hasExpiryDate() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required uint64 expiryDate = 5;
*/
public long getExpiryDate() {
return expiryDate_;
}
// optional string encryptionAlgorithm = 6;
public static final int ENCRYPTIONALGORITHM_FIELD_NUMBER = 6;
private java.lang.Object encryptionAlgorithm_;
/**
* optional string encryptionAlgorithm = 6;
*/
public boolean hasEncryptionAlgorithm() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional string encryptionAlgorithm = 6;
*/
public java.lang.String getEncryptionAlgorithm() {
java.lang.Object ref = encryptionAlgorithm_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
encryptionAlgorithm_ = s;
}
return s;
}
}
/**
* optional string encryptionAlgorithm = 6;
*/
public com.google.protobuf.ByteString
getEncryptionAlgorithmBytes() {
java.lang.Object ref = encryptionAlgorithm_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
encryptionAlgorithm_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
keyId_ = 0;
blockPoolId_ = "";
nonce_ = com.google.protobuf.ByteString.EMPTY;
encryptionKey_ = com.google.protobuf.ByteString.EMPTY;
expiryDate_ = 0L;
encryptionAlgorithm_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasKeyId()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasBlockPoolId()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasNonce()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasEncryptionKey()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasExpiryDate()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt32(1, keyId_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getBlockPoolIdBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, nonce_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeBytes(4, encryptionKey_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeUInt64(5, expiryDate_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeBytes(6, getEncryptionAlgorithmBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(1, keyId_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getBlockPoolIdBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, nonce_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(4, encryptionKey_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(5, expiryDate_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(6, getEncryptionAlgorithmBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto) obj;
boolean result = true;
result = result && (hasKeyId() == other.hasKeyId());
if (hasKeyId()) {
result = result && (getKeyId()
== other.getKeyId());
}
result = result && (hasBlockPoolId() == other.hasBlockPoolId());
if (hasBlockPoolId()) {
result = result && getBlockPoolId()
.equals(other.getBlockPoolId());
}
result = result && (hasNonce() == other.hasNonce());
if (hasNonce()) {
result = result && getNonce()
.equals(other.getNonce());
}
result = result && (hasEncryptionKey() == other.hasEncryptionKey());
if (hasEncryptionKey()) {
result = result && getEncryptionKey()
.equals(other.getEncryptionKey());
}
result = result && (hasExpiryDate() == other.hasExpiryDate());
if (hasExpiryDate()) {
result = result && (getExpiryDate()
== other.getExpiryDate());
}
result = result && (hasEncryptionAlgorithm() == other.hasEncryptionAlgorithm());
if (hasEncryptionAlgorithm()) {
result = result && getEncryptionAlgorithm()
.equals(other.getEncryptionAlgorithm());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasKeyId()) {
hash = (37 * hash) + KEYID_FIELD_NUMBER;
hash = (53 * hash) + getKeyId();
}
if (hasBlockPoolId()) {
hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER;
hash = (53 * hash) + getBlockPoolId().hashCode();
}
if (hasNonce()) {
hash = (37 * hash) + NONCE_FIELD_NUMBER;
hash = (53 * hash) + getNonce().hashCode();
}
if (hasEncryptionKey()) {
hash = (37 * hash) + ENCRYPTIONKEY_FIELD_NUMBER;
hash = (53 * hash) + getEncryptionKey().hashCode();
}
if (hasExpiryDate()) {
hash = (37 * hash) + EXPIRYDATE_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getExpiryDate());
}
if (hasEncryptionAlgorithm()) {
hash = (37 * hash) + ENCRYPTIONALGORITHM_FIELD_NUMBER;
hash = (53 * hash) + getEncryptionAlgorithm().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.DataEncryptionKeyProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DataEncryptionKeyProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DataEncryptionKeyProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
keyId_ = 0;
bitField0_ = (bitField0_ & ~0x00000001);
blockPoolId_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
nonce_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000004);
encryptionKey_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000008);
expiryDate_ = 0L;
bitField0_ = (bitField0_ & ~0x00000010);
encryptionAlgorithm_ = "";
bitField0_ = (bitField0_ & ~0x00000020);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DataEncryptionKeyProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.keyId_ = keyId_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.blockPoolId_ = blockPoolId_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.nonce_ = nonce_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.encryptionKey_ = encryptionKey_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.expiryDate_ = expiryDate_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000020;
}
result.encryptionAlgorithm_ = encryptionAlgorithm_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.getDefaultInstance()) return this;
if (other.hasKeyId()) {
setKeyId(other.getKeyId());
}
if (other.hasBlockPoolId()) {
bitField0_ |= 0x00000002;
blockPoolId_ = other.blockPoolId_;
onChanged();
}
if (other.hasNonce()) {
setNonce(other.getNonce());
}
if (other.hasEncryptionKey()) {
setEncryptionKey(other.getEncryptionKey());
}
if (other.hasExpiryDate()) {
setExpiryDate(other.getExpiryDate());
}
if (other.hasEncryptionAlgorithm()) {
bitField0_ |= 0x00000020;
encryptionAlgorithm_ = other.encryptionAlgorithm_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasKeyId()) {
return false;
}
if (!hasBlockPoolId()) {
return false;
}
if (!hasNonce()) {
return false;
}
if (!hasEncryptionKey()) {
return false;
}
if (!hasExpiryDate()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required uint32 keyId = 1;
private int keyId_ ;
/**
* required uint32 keyId = 1;
*/
public boolean hasKeyId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint32 keyId = 1;
*/
public int getKeyId() {
return keyId_;
}
/**
* required uint32 keyId = 1;
*/
public Builder setKeyId(int value) {
bitField0_ |= 0x00000001;
keyId_ = value;
onChanged();
return this;
}
/**
* required uint32 keyId = 1;
*/
public Builder clearKeyId() {
bitField0_ = (bitField0_ & ~0x00000001);
keyId_ = 0;
onChanged();
return this;
}
// required string blockPoolId = 2;
private java.lang.Object blockPoolId_ = "";
/**
* required string blockPoolId = 2;
*/
public boolean hasBlockPoolId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string blockPoolId = 2;
*/
public java.lang.String getBlockPoolId() {
java.lang.Object ref = blockPoolId_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
blockPoolId_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string blockPoolId = 2;
*/
public com.google.protobuf.ByteString
getBlockPoolIdBytes() {
java.lang.Object ref = blockPoolId_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
blockPoolId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string blockPoolId = 2;
*/
public Builder setBlockPoolId(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
blockPoolId_ = value;
onChanged();
return this;
}
/**
* required string blockPoolId = 2;
*/
public Builder clearBlockPoolId() {
bitField0_ = (bitField0_ & ~0x00000002);
blockPoolId_ = getDefaultInstance().getBlockPoolId();
onChanged();
return this;
}
/**
* required string blockPoolId = 2;
*/
public Builder setBlockPoolIdBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
blockPoolId_ = value;
onChanged();
return this;
}
// required bytes nonce = 3;
private com.google.protobuf.ByteString nonce_ = com.google.protobuf.ByteString.EMPTY;
/**
* required bytes nonce = 3;
*/
public boolean hasNonce() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required bytes nonce = 3;
*/
public com.google.protobuf.ByteString getNonce() {
return nonce_;
}
/**
* required bytes nonce = 3;
*/
public Builder setNonce(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
nonce_ = value;
onChanged();
return this;
}
/**
* required bytes nonce = 3;
*/
public Builder clearNonce() {
bitField0_ = (bitField0_ & ~0x00000004);
nonce_ = getDefaultInstance().getNonce();
onChanged();
return this;
}
// required bytes encryptionKey = 4;
private com.google.protobuf.ByteString encryptionKey_ = com.google.protobuf.ByteString.EMPTY;
/**
* required bytes encryptionKey = 4;
*/
public boolean hasEncryptionKey() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required bytes encryptionKey = 4;
*/
public com.google.protobuf.ByteString getEncryptionKey() {
return encryptionKey_;
}
/**
* required bytes encryptionKey = 4;
*/
public Builder setEncryptionKey(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
encryptionKey_ = value;
onChanged();
return this;
}
/**
* required bytes encryptionKey = 4;
*/
public Builder clearEncryptionKey() {
bitField0_ = (bitField0_ & ~0x00000008);
encryptionKey_ = getDefaultInstance().getEncryptionKey();
onChanged();
return this;
}
// required uint64 expiryDate = 5;
private long expiryDate_ ;
/**
* required uint64 expiryDate = 5;
*/
public boolean hasExpiryDate() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required uint64 expiryDate = 5;
*/
public long getExpiryDate() {
return expiryDate_;
}
/**
* required uint64 expiryDate = 5;
*/
public Builder setExpiryDate(long value) {
bitField0_ |= 0x00000010;
expiryDate_ = value;
onChanged();
return this;
}
/**
* required uint64 expiryDate = 5;
*/
public Builder clearExpiryDate() {
bitField0_ = (bitField0_ & ~0x00000010);
expiryDate_ = 0L;
onChanged();
return this;
}
// optional string encryptionAlgorithm = 6;
private java.lang.Object encryptionAlgorithm_ = "";
/**
* optional string encryptionAlgorithm = 6;
*/
public boolean hasEncryptionAlgorithm() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional string encryptionAlgorithm = 6;
*/
public java.lang.String getEncryptionAlgorithm() {
java.lang.Object ref = encryptionAlgorithm_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
encryptionAlgorithm_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string encryptionAlgorithm = 6;
*/
public com.google.protobuf.ByteString
getEncryptionAlgorithmBytes() {
java.lang.Object ref = encryptionAlgorithm_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
encryptionAlgorithm_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* optional string encryptionAlgorithm = 6;
*/
public Builder setEncryptionAlgorithm(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000020;
encryptionAlgorithm_ = value;
onChanged();
return this;
}
/**
* optional string encryptionAlgorithm = 6;
*/
public Builder clearEncryptionAlgorithm() {
bitField0_ = (bitField0_ & ~0x00000020);
encryptionAlgorithm_ = getDefaultInstance().getEncryptionAlgorithm();
onChanged();
return this;
}
/**
* optional string encryptionAlgorithm = 6;
*/
public Builder setEncryptionAlgorithmBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000020;
encryptionAlgorithm_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DataEncryptionKeyProto)
}
static {
defaultInstance = new DataEncryptionKeyProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DataEncryptionKeyProto)
}
public interface FileEncryptionInfoProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.CipherSuiteProto suite = 1;
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
boolean hasSuite();
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite();
// required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
boolean hasCryptoProtocolVersion();
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion();
// required bytes key = 3;
/**
* required bytes key = 3;
*/
boolean hasKey();
/**
* required bytes key = 3;
*/
com.google.protobuf.ByteString getKey();
// required bytes iv = 4;
/**
* required bytes iv = 4;
*/
boolean hasIv();
/**
* required bytes iv = 4;
*/
com.google.protobuf.ByteString getIv();
// required string keyName = 5;
/**
* required string keyName = 5;
*/
boolean hasKeyName();
/**
* required string keyName = 5;
*/
java.lang.String getKeyName();
/**
* required string keyName = 5;
*/
com.google.protobuf.ByteString
getKeyNameBytes();
// required string ezKeyVersionName = 6;
/**
* required string ezKeyVersionName = 6;
*/
boolean hasEzKeyVersionName();
/**
* required string ezKeyVersionName = 6;
*/
java.lang.String getEzKeyVersionName();
/**
* required string ezKeyVersionName = 6;
*/
com.google.protobuf.ByteString
getEzKeyVersionNameBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.FileEncryptionInfoProto}
*
*
**
* Encryption information for a file.
*
*/
public static final class FileEncryptionInfoProto extends
com.google.protobuf.GeneratedMessage
implements FileEncryptionInfoProtoOrBuilder {
// Use FileEncryptionInfoProto.newBuilder() to construct.
private FileEncryptionInfoProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private FileEncryptionInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final FileEncryptionInfoProto defaultInstance;
public static FileEncryptionInfoProto getDefaultInstance() {
return defaultInstance;
}
public FileEncryptionInfoProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private FileEncryptionInfoProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
suite_ = value;
}
break;
}
case 16: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(2, rawValue);
} else {
bitField0_ |= 0x00000002;
cryptoProtocolVersion_ = value;
}
break;
}
case 26: {
bitField0_ |= 0x00000004;
key_ = input.readBytes();
break;
}
case 34: {
bitField0_ |= 0x00000008;
iv_ = input.readBytes();
break;
}
case 42: {
bitField0_ |= 0x00000010;
keyName_ = input.readBytes();
break;
}
case 50: {
bitField0_ |= 0x00000020;
ezKeyVersionName_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FileEncryptionInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FileEncryptionInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public FileEncryptionInfoProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new FileEncryptionInfoProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.CipherSuiteProto suite = 1;
public static final int SUITE_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto suite_;
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public boolean hasSuite() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() {
return suite_;
}
// required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
public static final int CRYPTOPROTOCOLVERSION_FIELD_NUMBER = 2;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto cryptoProtocolVersion_;
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
public boolean hasCryptoProtocolVersion() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion() {
return cryptoProtocolVersion_;
}
// required bytes key = 3;
public static final int KEY_FIELD_NUMBER = 3;
private com.google.protobuf.ByteString key_;
/**
* required bytes key = 3;
*/
public boolean hasKey() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required bytes key = 3;
*/
public com.google.protobuf.ByteString getKey() {
return key_;
}
// required bytes iv = 4;
public static final int IV_FIELD_NUMBER = 4;
private com.google.protobuf.ByteString iv_;
/**
* required bytes iv = 4;
*/
public boolean hasIv() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required bytes iv = 4;
*/
public com.google.protobuf.ByteString getIv() {
return iv_;
}
// required string keyName = 5;
public static final int KEYNAME_FIELD_NUMBER = 5;
private java.lang.Object keyName_;
/**
* required string keyName = 5;
*/
public boolean hasKeyName() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required string keyName = 5;
*/
public java.lang.String getKeyName() {
java.lang.Object ref = keyName_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
keyName_ = s;
}
return s;
}
}
/**
* required string keyName = 5;
*/
public com.google.protobuf.ByteString
getKeyNameBytes() {
java.lang.Object ref = keyName_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
keyName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required string ezKeyVersionName = 6;
public static final int EZKEYVERSIONNAME_FIELD_NUMBER = 6;
private java.lang.Object ezKeyVersionName_;
/**
* required string ezKeyVersionName = 6;
*/
public boolean hasEzKeyVersionName() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* required string ezKeyVersionName = 6;
*/
public java.lang.String getEzKeyVersionName() {
java.lang.Object ref = ezKeyVersionName_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
ezKeyVersionName_ = s;
}
return s;
}
}
/**
* required string ezKeyVersionName = 6;
*/
public com.google.protobuf.ByteString
getEzKeyVersionNameBytes() {
java.lang.Object ref = ezKeyVersionName_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
ezKeyVersionName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN;
cryptoProtocolVersion_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION;
key_ = com.google.protobuf.ByteString.EMPTY;
iv_ = com.google.protobuf.ByteString.EMPTY;
keyName_ = "";
ezKeyVersionName_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasSuite()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasCryptoProtocolVersion()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasKey()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasIv()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasKeyName()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasEzKeyVersionName()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, suite_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeEnum(2, cryptoProtocolVersion_.getNumber());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, key_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeBytes(4, iv_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeBytes(5, getKeyNameBytes());
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeBytes(6, getEzKeyVersionNameBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, suite_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(2, cryptoProtocolVersion_.getNumber());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, key_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(4, iv_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(5, getKeyNameBytes());
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(6, getEzKeyVersionNameBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto) obj;
boolean result = true;
result = result && (hasSuite() == other.hasSuite());
if (hasSuite()) {
result = result &&
(getSuite() == other.getSuite());
}
result = result && (hasCryptoProtocolVersion() == other.hasCryptoProtocolVersion());
if (hasCryptoProtocolVersion()) {
result = result &&
(getCryptoProtocolVersion() == other.getCryptoProtocolVersion());
}
result = result && (hasKey() == other.hasKey());
if (hasKey()) {
result = result && getKey()
.equals(other.getKey());
}
result = result && (hasIv() == other.hasIv());
if (hasIv()) {
result = result && getIv()
.equals(other.getIv());
}
result = result && (hasKeyName() == other.hasKeyName());
if (hasKeyName()) {
result = result && getKeyName()
.equals(other.getKeyName());
}
result = result && (hasEzKeyVersionName() == other.hasEzKeyVersionName());
if (hasEzKeyVersionName()) {
result = result && getEzKeyVersionName()
.equals(other.getEzKeyVersionName());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasSuite()) {
hash = (37 * hash) + SUITE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getSuite());
}
if (hasCryptoProtocolVersion()) {
hash = (37 * hash) + CRYPTOPROTOCOLVERSION_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getCryptoProtocolVersion());
}
if (hasKey()) {
hash = (37 * hash) + KEY_FIELD_NUMBER;
hash = (53 * hash) + getKey().hashCode();
}
if (hasIv()) {
hash = (37 * hash) + IV_FIELD_NUMBER;
hash = (53 * hash) + getIv().hashCode();
}
if (hasKeyName()) {
hash = (37 * hash) + KEYNAME_FIELD_NUMBER;
hash = (53 * hash) + getKeyName().hashCode();
}
if (hasEzKeyVersionName()) {
hash = (37 * hash) + EZKEYVERSIONNAME_FIELD_NUMBER;
hash = (53 * hash) + getEzKeyVersionName().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.FileEncryptionInfoProto}
*
*
**
* Encryption information for a file.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FileEncryptionInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FileEncryptionInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN;
bitField0_ = (bitField0_ & ~0x00000001);
cryptoProtocolVersion_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION;
bitField0_ = (bitField0_ & ~0x00000002);
key_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000004);
iv_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000008);
keyName_ = "";
bitField0_ = (bitField0_ & ~0x00000010);
ezKeyVersionName_ = "";
bitField0_ = (bitField0_ & ~0x00000020);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FileEncryptionInfoProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.suite_ = suite_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.cryptoProtocolVersion_ = cryptoProtocolVersion_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.key_ = key_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.iv_ = iv_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.keyName_ = keyName_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000020;
}
result.ezKeyVersionName_ = ezKeyVersionName_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance()) return this;
if (other.hasSuite()) {
setSuite(other.getSuite());
}
if (other.hasCryptoProtocolVersion()) {
setCryptoProtocolVersion(other.getCryptoProtocolVersion());
}
if (other.hasKey()) {
setKey(other.getKey());
}
if (other.hasIv()) {
setIv(other.getIv());
}
if (other.hasKeyName()) {
bitField0_ |= 0x00000010;
keyName_ = other.keyName_;
onChanged();
}
if (other.hasEzKeyVersionName()) {
bitField0_ |= 0x00000020;
ezKeyVersionName_ = other.ezKeyVersionName_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasSuite()) {
return false;
}
if (!hasCryptoProtocolVersion()) {
return false;
}
if (!hasKey()) {
return false;
}
if (!hasIv()) {
return false;
}
if (!hasKeyName()) {
return false;
}
if (!hasEzKeyVersionName()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.CipherSuiteProto suite = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN;
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public boolean hasSuite() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() {
return suite_;
}
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public Builder setSuite(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
suite_ = value;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public Builder clearSuite() {
bitField0_ = (bitField0_ & ~0x00000001);
suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN;
onChanged();
return this;
}
// required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto cryptoProtocolVersion_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION;
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
public boolean hasCryptoProtocolVersion() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion() {
return cryptoProtocolVersion_;
}
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
public Builder setCryptoProtocolVersion(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
cryptoProtocolVersion_ = value;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
public Builder clearCryptoProtocolVersion() {
bitField0_ = (bitField0_ & ~0x00000002);
cryptoProtocolVersion_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION;
onChanged();
return this;
}
// required bytes key = 3;
private com.google.protobuf.ByteString key_ = com.google.protobuf.ByteString.EMPTY;
/**
* required bytes key = 3;
*/
public boolean hasKey() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required bytes key = 3;
*/
public com.google.protobuf.ByteString getKey() {
return key_;
}
/**
* required bytes key = 3;
*/
public Builder setKey(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
key_ = value;
onChanged();
return this;
}
/**
* required bytes key = 3;
*/
public Builder clearKey() {
bitField0_ = (bitField0_ & ~0x00000004);
key_ = getDefaultInstance().getKey();
onChanged();
return this;
}
// required bytes iv = 4;
private com.google.protobuf.ByteString iv_ = com.google.protobuf.ByteString.EMPTY;
/**
* required bytes iv = 4;
*/
public boolean hasIv() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required bytes iv = 4;
*/
public com.google.protobuf.ByteString getIv() {
return iv_;
}
/**
* required bytes iv = 4;
*/
public Builder setIv(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
iv_ = value;
onChanged();
return this;
}
/**
* required bytes iv = 4;
*/
public Builder clearIv() {
bitField0_ = (bitField0_ & ~0x00000008);
iv_ = getDefaultInstance().getIv();
onChanged();
return this;
}
// required string keyName = 5;
private java.lang.Object keyName_ = "";
/**
* required string keyName = 5;
*/
public boolean hasKeyName() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required string keyName = 5;
*/
public java.lang.String getKeyName() {
java.lang.Object ref = keyName_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
keyName_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string keyName = 5;
*/
public com.google.protobuf.ByteString
getKeyNameBytes() {
java.lang.Object ref = keyName_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
keyName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string keyName = 5;
*/
public Builder setKeyName(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000010;
keyName_ = value;
onChanged();
return this;
}
/**
* required string keyName = 5;
*/
public Builder clearKeyName() {
bitField0_ = (bitField0_ & ~0x00000010);
keyName_ = getDefaultInstance().getKeyName();
onChanged();
return this;
}
/**
* required string keyName = 5;
*/
public Builder setKeyNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000010;
keyName_ = value;
onChanged();
return this;
}
// required string ezKeyVersionName = 6;
private java.lang.Object ezKeyVersionName_ = "";
/**
* required string ezKeyVersionName = 6;
*/
public boolean hasEzKeyVersionName() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* required string ezKeyVersionName = 6;
*/
public java.lang.String getEzKeyVersionName() {
java.lang.Object ref = ezKeyVersionName_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
ezKeyVersionName_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string ezKeyVersionName = 6;
*/
public com.google.protobuf.ByteString
getEzKeyVersionNameBytes() {
java.lang.Object ref = ezKeyVersionName_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
ezKeyVersionName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string ezKeyVersionName = 6;
*/
public Builder setEzKeyVersionName(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000020;
ezKeyVersionName_ = value;
onChanged();
return this;
}
/**
* required string ezKeyVersionName = 6;
*/
public Builder clearEzKeyVersionName() {
bitField0_ = (bitField0_ & ~0x00000020);
ezKeyVersionName_ = getDefaultInstance().getEzKeyVersionName();
onChanged();
return this;
}
/**
* required string ezKeyVersionName = 6;
*/
public Builder setEzKeyVersionNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000020;
ezKeyVersionName_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.FileEncryptionInfoProto)
}
static {
defaultInstance = new FileEncryptionInfoProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.FileEncryptionInfoProto)
}
public interface PerFileEncryptionInfoProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required bytes key = 1;
/**
* required bytes key = 1;
*/
boolean hasKey();
/**
* required bytes key = 1;
*/
com.google.protobuf.ByteString getKey();
// required bytes iv = 2;
/**
* required bytes iv = 2;
*/
boolean hasIv();
/**
* required bytes iv = 2;
*/
com.google.protobuf.ByteString getIv();
// required string ezKeyVersionName = 3;
/**
* required string ezKeyVersionName = 3;
*/
boolean hasEzKeyVersionName();
/**
* required string ezKeyVersionName = 3;
*/
java.lang.String getEzKeyVersionName();
/**
* required string ezKeyVersionName = 3;
*/
com.google.protobuf.ByteString
getEzKeyVersionNameBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.PerFileEncryptionInfoProto}
*
*
**
* Encryption information for an individual
* file within an encryption zone
*
*/
public static final class PerFileEncryptionInfoProto extends
com.google.protobuf.GeneratedMessage
implements PerFileEncryptionInfoProtoOrBuilder {
// Use PerFileEncryptionInfoProto.newBuilder() to construct.
private PerFileEncryptionInfoProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private PerFileEncryptionInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final PerFileEncryptionInfoProto defaultInstance;
public static PerFileEncryptionInfoProto getDefaultInstance() {
return defaultInstance;
}
public PerFileEncryptionInfoProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private PerFileEncryptionInfoProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
key_ = input.readBytes();
break;
}
case 18: {
bitField0_ |= 0x00000002;
iv_ = input.readBytes();
break;
}
case 26: {
bitField0_ |= 0x00000004;
ezKeyVersionName_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public PerFileEncryptionInfoProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new PerFileEncryptionInfoProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required bytes key = 1;
public static final int KEY_FIELD_NUMBER = 1;
private com.google.protobuf.ByteString key_;
/**
* required bytes key = 1;
*/
public boolean hasKey() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required bytes key = 1;
*/
public com.google.protobuf.ByteString getKey() {
return key_;
}
// required bytes iv = 2;
public static final int IV_FIELD_NUMBER = 2;
private com.google.protobuf.ByteString iv_;
/**
* required bytes iv = 2;
*/
public boolean hasIv() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required bytes iv = 2;
*/
public com.google.protobuf.ByteString getIv() {
return iv_;
}
// required string ezKeyVersionName = 3;
public static final int EZKEYVERSIONNAME_FIELD_NUMBER = 3;
private java.lang.Object ezKeyVersionName_;
/**
* required string ezKeyVersionName = 3;
*/
public boolean hasEzKeyVersionName() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required string ezKeyVersionName = 3;
*/
public java.lang.String getEzKeyVersionName() {
java.lang.Object ref = ezKeyVersionName_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
ezKeyVersionName_ = s;
}
return s;
}
}
/**
* required string ezKeyVersionName = 3;
*/
public com.google.protobuf.ByteString
getEzKeyVersionNameBytes() {
java.lang.Object ref = ezKeyVersionName_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
ezKeyVersionName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
key_ = com.google.protobuf.ByteString.EMPTY;
iv_ = com.google.protobuf.ByteString.EMPTY;
ezKeyVersionName_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasKey()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasIv()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasEzKeyVersionName()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, key_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, iv_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, getEzKeyVersionNameBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, key_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, iv_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, getEzKeyVersionNameBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto) obj;
boolean result = true;
result = result && (hasKey() == other.hasKey());
if (hasKey()) {
result = result && getKey()
.equals(other.getKey());
}
result = result && (hasIv() == other.hasIv());
if (hasIv()) {
result = result && getIv()
.equals(other.getIv());
}
result = result && (hasEzKeyVersionName() == other.hasEzKeyVersionName());
if (hasEzKeyVersionName()) {
result = result && getEzKeyVersionName()
.equals(other.getEzKeyVersionName());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasKey()) {
hash = (37 * hash) + KEY_FIELD_NUMBER;
hash = (53 * hash) + getKey().hashCode();
}
if (hasIv()) {
hash = (37 * hash) + IV_FIELD_NUMBER;
hash = (53 * hash) + getIv().hashCode();
}
if (hasEzKeyVersionName()) {
hash = (37 * hash) + EZKEYVERSIONNAME_FIELD_NUMBER;
hash = (53 * hash) + getEzKeyVersionName().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.PerFileEncryptionInfoProto}
*
*
**
* Encryption information for an individual
* file within an encryption zone
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
key_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000001);
iv_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000002);
ezKeyVersionName_ = "";
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.key_ = key_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.iv_ = iv_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.ezKeyVersionName_ = ezKeyVersionName_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.getDefaultInstance()) return this;
if (other.hasKey()) {
setKey(other.getKey());
}
if (other.hasIv()) {
setIv(other.getIv());
}
if (other.hasEzKeyVersionName()) {
bitField0_ |= 0x00000004;
ezKeyVersionName_ = other.ezKeyVersionName_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasKey()) {
return false;
}
if (!hasIv()) {
return false;
}
if (!hasEzKeyVersionName()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required bytes key = 1;
private com.google.protobuf.ByteString key_ = com.google.protobuf.ByteString.EMPTY;
/**
* required bytes key = 1;
*/
public boolean hasKey() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required bytes key = 1;
*/
public com.google.protobuf.ByteString getKey() {
return key_;
}
/**
* required bytes key = 1;
*/
public Builder setKey(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
key_ = value;
onChanged();
return this;
}
/**
* required bytes key = 1;
*/
public Builder clearKey() {
bitField0_ = (bitField0_ & ~0x00000001);
key_ = getDefaultInstance().getKey();
onChanged();
return this;
}
// required bytes iv = 2;
private com.google.protobuf.ByteString iv_ = com.google.protobuf.ByteString.EMPTY;
/**
* required bytes iv = 2;
*/
public boolean hasIv() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required bytes iv = 2;
*/
public com.google.protobuf.ByteString getIv() {
return iv_;
}
/**
* required bytes iv = 2;
*/
public Builder setIv(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
iv_ = value;
onChanged();
return this;
}
/**
* required bytes iv = 2;
*/
public Builder clearIv() {
bitField0_ = (bitField0_ & ~0x00000002);
iv_ = getDefaultInstance().getIv();
onChanged();
return this;
}
// required string ezKeyVersionName = 3;
private java.lang.Object ezKeyVersionName_ = "";
/**
* required string ezKeyVersionName = 3;
*/
public boolean hasEzKeyVersionName() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required string ezKeyVersionName = 3;
*/
public java.lang.String getEzKeyVersionName() {
java.lang.Object ref = ezKeyVersionName_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
ezKeyVersionName_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string ezKeyVersionName = 3;
*/
public com.google.protobuf.ByteString
getEzKeyVersionNameBytes() {
java.lang.Object ref = ezKeyVersionName_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
ezKeyVersionName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string ezKeyVersionName = 3;
*/
public Builder setEzKeyVersionName(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
ezKeyVersionName_ = value;
onChanged();
return this;
}
/**
* required string ezKeyVersionName = 3;
*/
public Builder clearEzKeyVersionName() {
bitField0_ = (bitField0_ & ~0x00000004);
ezKeyVersionName_ = getDefaultInstance().getEzKeyVersionName();
onChanged();
return this;
}
/**
* required string ezKeyVersionName = 3;
*/
public Builder setEzKeyVersionNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
ezKeyVersionName_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.PerFileEncryptionInfoProto)
}
static {
defaultInstance = new PerFileEncryptionInfoProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.PerFileEncryptionInfoProto)
}
public interface ZoneEncryptionInfoProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.CipherSuiteProto suite = 1;
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
boolean hasSuite();
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite();
// required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
boolean hasCryptoProtocolVersion();
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion();
// required string keyName = 3;
/**
* required string keyName = 3;
*/
boolean hasKeyName();
/**
* required string keyName = 3;
*/
java.lang.String getKeyName();
/**
* required string keyName = 3;
*/
com.google.protobuf.ByteString
getKeyNameBytes();
// optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;
/**
* optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;
*/
boolean hasReencryptionProto();
/**
* optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto getReencryptionProto();
/**
* optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProtoOrBuilder getReencryptionProtoOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.ZoneEncryptionInfoProto}
*
*
**
* Encryption information for an encryption
* zone
*
*/
public static final class ZoneEncryptionInfoProto extends
com.google.protobuf.GeneratedMessage
implements ZoneEncryptionInfoProtoOrBuilder {
// Use ZoneEncryptionInfoProto.newBuilder() to construct.
private ZoneEncryptionInfoProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ZoneEncryptionInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ZoneEncryptionInfoProto defaultInstance;
public static ZoneEncryptionInfoProto getDefaultInstance() {
return defaultInstance;
}
public ZoneEncryptionInfoProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ZoneEncryptionInfoProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
suite_ = value;
}
break;
}
case 16: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(2, rawValue);
} else {
bitField0_ |= 0x00000002;
cryptoProtocolVersion_ = value;
}
break;
}
case 26: {
bitField0_ |= 0x00000004;
keyName_ = input.readBytes();
break;
}
case 34: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000008) == 0x00000008)) {
subBuilder = reencryptionProto_.toBuilder();
}
reencryptionProto_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(reencryptionProto_);
reencryptionProto_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000008;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ZoneEncryptionInfoProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ZoneEncryptionInfoProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.CipherSuiteProto suite = 1;
public static final int SUITE_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto suite_;
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public boolean hasSuite() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() {
return suite_;
}
// required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
public static final int CRYPTOPROTOCOLVERSION_FIELD_NUMBER = 2;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto cryptoProtocolVersion_;
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
public boolean hasCryptoProtocolVersion() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion() {
return cryptoProtocolVersion_;
}
// required string keyName = 3;
public static final int KEYNAME_FIELD_NUMBER = 3;
private java.lang.Object keyName_;
/**
* required string keyName = 3;
*/
public boolean hasKeyName() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required string keyName = 3;
*/
public java.lang.String getKeyName() {
java.lang.Object ref = keyName_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
keyName_ = s;
}
return s;
}
}
/**
* required string keyName = 3;
*/
public com.google.protobuf.ByteString
getKeyNameBytes() {
java.lang.Object ref = keyName_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
keyName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;
public static final int REENCRYPTIONPROTO_FIELD_NUMBER = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto reencryptionProto_;
/**
* optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;
*/
public boolean hasReencryptionProto() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto getReencryptionProto() {
return reencryptionProto_;
}
/**
* optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProtoOrBuilder getReencryptionProtoOrBuilder() {
return reencryptionProto_;
}
private void initFields() {
suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN;
cryptoProtocolVersion_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION;
keyName_ = "";
reencryptionProto_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasSuite()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasCryptoProtocolVersion()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasKeyName()) {
memoizedIsInitialized = 0;
return false;
}
if (hasReencryptionProto()) {
if (!getReencryptionProto().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, suite_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeEnum(2, cryptoProtocolVersion_.getNumber());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, getKeyNameBytes());
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeMessage(4, reencryptionProto_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, suite_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(2, cryptoProtocolVersion_.getNumber());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, getKeyNameBytes());
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(4, reencryptionProto_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto) obj;
boolean result = true;
result = result && (hasSuite() == other.hasSuite());
if (hasSuite()) {
result = result &&
(getSuite() == other.getSuite());
}
result = result && (hasCryptoProtocolVersion() == other.hasCryptoProtocolVersion());
if (hasCryptoProtocolVersion()) {
result = result &&
(getCryptoProtocolVersion() == other.getCryptoProtocolVersion());
}
result = result && (hasKeyName() == other.hasKeyName());
if (hasKeyName()) {
result = result && getKeyName()
.equals(other.getKeyName());
}
result = result && (hasReencryptionProto() == other.hasReencryptionProto());
if (hasReencryptionProto()) {
result = result && getReencryptionProto()
.equals(other.getReencryptionProto());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasSuite()) {
hash = (37 * hash) + SUITE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getSuite());
}
if (hasCryptoProtocolVersion()) {
hash = (37 * hash) + CRYPTOPROTOCOLVERSION_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getCryptoProtocolVersion());
}
if (hasKeyName()) {
hash = (37 * hash) + KEYNAME_FIELD_NUMBER;
hash = (53 * hash) + getKeyName().hashCode();
}
if (hasReencryptionProto()) {
hash = (37 * hash) + REENCRYPTIONPROTO_FIELD_NUMBER;
hash = (53 * hash) + getReencryptionProto().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ZoneEncryptionInfoProto}
*
*
**
* Encryption information for an encryption
* zone
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getReencryptionProtoFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN;
bitField0_ = (bitField0_ & ~0x00000001);
cryptoProtocolVersion_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION;
bitField0_ = (bitField0_ & ~0x00000002);
keyName_ = "";
bitField0_ = (bitField0_ & ~0x00000004);
if (reencryptionProtoBuilder_ == null) {
reencryptionProto_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.getDefaultInstance();
} else {
reencryptionProtoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.suite_ = suite_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.cryptoProtocolVersion_ = cryptoProtocolVersion_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.keyName_ = keyName_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
if (reencryptionProtoBuilder_ == null) {
result.reencryptionProto_ = reencryptionProto_;
} else {
result.reencryptionProto_ = reencryptionProtoBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.getDefaultInstance()) return this;
if (other.hasSuite()) {
setSuite(other.getSuite());
}
if (other.hasCryptoProtocolVersion()) {
setCryptoProtocolVersion(other.getCryptoProtocolVersion());
}
if (other.hasKeyName()) {
bitField0_ |= 0x00000004;
keyName_ = other.keyName_;
onChanged();
}
if (other.hasReencryptionProto()) {
mergeReencryptionProto(other.getReencryptionProto());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasSuite()) {
return false;
}
if (!hasCryptoProtocolVersion()) {
return false;
}
if (!hasKeyName()) {
return false;
}
if (hasReencryptionProto()) {
if (!getReencryptionProto().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.CipherSuiteProto suite = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN;
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public boolean hasSuite() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() {
return suite_;
}
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public Builder setSuite(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
suite_ = value;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public Builder clearSuite() {
bitField0_ = (bitField0_ & ~0x00000001);
suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN;
onChanged();
return this;
}
// required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto cryptoProtocolVersion_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION;
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
public boolean hasCryptoProtocolVersion() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion() {
return cryptoProtocolVersion_;
}
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
public Builder setCryptoProtocolVersion(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
cryptoProtocolVersion_ = value;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;
*/
public Builder clearCryptoProtocolVersion() {
bitField0_ = (bitField0_ & ~0x00000002);
cryptoProtocolVersion_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION;
onChanged();
return this;
}
// required string keyName = 3;
private java.lang.Object keyName_ = "";
/**
* required string keyName = 3;
*/
public boolean hasKeyName() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required string keyName = 3;
*/
public java.lang.String getKeyName() {
java.lang.Object ref = keyName_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
keyName_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string keyName = 3;
*/
public com.google.protobuf.ByteString
getKeyNameBytes() {
java.lang.Object ref = keyName_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
keyName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string keyName = 3;
*/
public Builder setKeyName(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
keyName_ = value;
onChanged();
return this;
}
/**
* required string keyName = 3;
*/
public Builder clearKeyName() {
bitField0_ = (bitField0_ & ~0x00000004);
keyName_ = getDefaultInstance().getKeyName();
onChanged();
return this;
}
/**
* required string keyName = 3;
*/
public Builder setKeyNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
keyName_ = value;
onChanged();
return this;
}
// optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto reencryptionProto_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProtoOrBuilder> reencryptionProtoBuilder_;
/**
* optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;
*/
public boolean hasReencryptionProto() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto getReencryptionProto() {
if (reencryptionProtoBuilder_ == null) {
return reencryptionProto_;
} else {
return reencryptionProtoBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;
*/
public Builder setReencryptionProto(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto value) {
if (reencryptionProtoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
reencryptionProto_ = value;
onChanged();
} else {
reencryptionProtoBuilder_.setMessage(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;
*/
public Builder setReencryptionProto(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.Builder builderForValue) {
if (reencryptionProtoBuilder_ == null) {
reencryptionProto_ = builderForValue.build();
onChanged();
} else {
reencryptionProtoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;
*/
public Builder mergeReencryptionProto(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto value) {
if (reencryptionProtoBuilder_ == null) {
if (((bitField0_ & 0x00000008) == 0x00000008) &&
reencryptionProto_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.getDefaultInstance()) {
reencryptionProto_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.newBuilder(reencryptionProto_).mergeFrom(value).buildPartial();
} else {
reencryptionProto_ = value;
}
onChanged();
} else {
reencryptionProtoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;
*/
public Builder clearReencryptionProto() {
if (reencryptionProtoBuilder_ == null) {
reencryptionProto_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.getDefaultInstance();
onChanged();
} else {
reencryptionProtoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
/**
* optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.Builder getReencryptionProtoBuilder() {
bitField0_ |= 0x00000008;
onChanged();
return getReencryptionProtoFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProtoOrBuilder getReencryptionProtoOrBuilder() {
if (reencryptionProtoBuilder_ != null) {
return reencryptionProtoBuilder_.getMessageOrBuilder();
} else {
return reencryptionProto_;
}
}
/**
* optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProtoOrBuilder>
getReencryptionProtoFieldBuilder() {
if (reencryptionProtoBuilder_ == null) {
reencryptionProtoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProtoOrBuilder>(
reencryptionProto_,
getParentForChildren(),
isClean());
reencryptionProto_ = null;
}
return reencryptionProtoBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ZoneEncryptionInfoProto)
}
static {
defaultInstance = new ZoneEncryptionInfoProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ZoneEncryptionInfoProto)
}
public interface ReencryptionInfoProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string ezKeyVersionName = 1;
/**
* required string ezKeyVersionName = 1;
*/
boolean hasEzKeyVersionName();
/**
* required string ezKeyVersionName = 1;
*/
java.lang.String getEzKeyVersionName();
/**
* required string ezKeyVersionName = 1;
*/
com.google.protobuf.ByteString
getEzKeyVersionNameBytes();
// required uint64 submissionTime = 2;
/**
* required uint64 submissionTime = 2;
*/
boolean hasSubmissionTime();
/**
* required uint64 submissionTime = 2;
*/
long getSubmissionTime();
// required bool canceled = 3;
/**
* required bool canceled = 3;
*/
boolean hasCanceled();
/**
* required bool canceled = 3;
*/
boolean getCanceled();
// required int64 numReencrypted = 4;
/**
* required int64 numReencrypted = 4;
*/
boolean hasNumReencrypted();
/**
* required int64 numReencrypted = 4;
*/
long getNumReencrypted();
// required int64 numFailures = 5;
/**
* required int64 numFailures = 5;
*/
boolean hasNumFailures();
/**
* required int64 numFailures = 5;
*/
long getNumFailures();
// optional uint64 completionTime = 6;
/**
* optional uint64 completionTime = 6;
*/
boolean hasCompletionTime();
/**
* optional uint64 completionTime = 6;
*/
long getCompletionTime();
// optional string lastFile = 7;
/**
* optional string lastFile = 7;
*/
boolean hasLastFile();
/**
* optional string lastFile = 7;
*/
java.lang.String getLastFile();
/**
* optional string lastFile = 7;
*/
com.google.protobuf.ByteString
getLastFileBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.ReencryptionInfoProto}
*
*
**
* Re-encryption information for an encryption zone
*
*/
public static final class ReencryptionInfoProto extends
com.google.protobuf.GeneratedMessage
implements ReencryptionInfoProtoOrBuilder {
// Use ReencryptionInfoProto.newBuilder() to construct.
private ReencryptionInfoProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ReencryptionInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ReencryptionInfoProto defaultInstance;
public static ReencryptionInfoProto getDefaultInstance() {
return defaultInstance;
}
public ReencryptionInfoProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ReencryptionInfoProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
ezKeyVersionName_ = input.readBytes();
break;
}
case 16: {
bitField0_ |= 0x00000002;
submissionTime_ = input.readUInt64();
break;
}
case 24: {
bitField0_ |= 0x00000004;
canceled_ = input.readBool();
break;
}
case 32: {
bitField0_ |= 0x00000008;
numReencrypted_ = input.readInt64();
break;
}
case 40: {
bitField0_ |= 0x00000010;
numFailures_ = input.readInt64();
break;
}
case 48: {
bitField0_ |= 0x00000020;
completionTime_ = input.readUInt64();
break;
}
case 58: {
bitField0_ |= 0x00000040;
lastFile_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ReencryptionInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ReencryptionInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ReencryptionInfoProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ReencryptionInfoProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required string ezKeyVersionName = 1;
public static final int EZKEYVERSIONNAME_FIELD_NUMBER = 1;
private java.lang.Object ezKeyVersionName_;
/**
* required string ezKeyVersionName = 1;
*/
public boolean hasEzKeyVersionName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string ezKeyVersionName = 1;
*/
public java.lang.String getEzKeyVersionName() {
java.lang.Object ref = ezKeyVersionName_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
ezKeyVersionName_ = s;
}
return s;
}
}
/**
* required string ezKeyVersionName = 1;
*/
public com.google.protobuf.ByteString
getEzKeyVersionNameBytes() {
java.lang.Object ref = ezKeyVersionName_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
ezKeyVersionName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required uint64 submissionTime = 2;
public static final int SUBMISSIONTIME_FIELD_NUMBER = 2;
private long submissionTime_;
/**
* required uint64 submissionTime = 2;
*/
public boolean hasSubmissionTime() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 submissionTime = 2;
*/
public long getSubmissionTime() {
return submissionTime_;
}
// required bool canceled = 3;
public static final int CANCELED_FIELD_NUMBER = 3;
private boolean canceled_;
/**
* required bool canceled = 3;
*/
public boolean hasCanceled() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required bool canceled = 3;
*/
public boolean getCanceled() {
return canceled_;
}
// required int64 numReencrypted = 4;
public static final int NUMREENCRYPTED_FIELD_NUMBER = 4;
private long numReencrypted_;
/**
* required int64 numReencrypted = 4;
*/
public boolean hasNumReencrypted() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required int64 numReencrypted = 4;
*/
public long getNumReencrypted() {
return numReencrypted_;
}
// required int64 numFailures = 5;
public static final int NUMFAILURES_FIELD_NUMBER = 5;
private long numFailures_;
/**
* required int64 numFailures = 5;
*/
public boolean hasNumFailures() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required int64 numFailures = 5;
*/
public long getNumFailures() {
return numFailures_;
}
// optional uint64 completionTime = 6;
public static final int COMPLETIONTIME_FIELD_NUMBER = 6;
private long completionTime_;
/**
* optional uint64 completionTime = 6;
*/
public boolean hasCompletionTime() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional uint64 completionTime = 6;
*/
public long getCompletionTime() {
return completionTime_;
}
// optional string lastFile = 7;
public static final int LASTFILE_FIELD_NUMBER = 7;
private java.lang.Object lastFile_;
/**
* optional string lastFile = 7;
*/
public boolean hasLastFile() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional string lastFile = 7;
*/
public java.lang.String getLastFile() {
java.lang.Object ref = lastFile_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
lastFile_ = s;
}
return s;
}
}
/**
* optional string lastFile = 7;
*/
public com.google.protobuf.ByteString
getLastFileBytes() {
java.lang.Object ref = lastFile_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
lastFile_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
ezKeyVersionName_ = "";
submissionTime_ = 0L;
canceled_ = false;
numReencrypted_ = 0L;
numFailures_ = 0L;
completionTime_ = 0L;
lastFile_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasEzKeyVersionName()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasSubmissionTime()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasCanceled()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasNumReencrypted()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasNumFailures()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getEzKeyVersionNameBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, submissionTime_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBool(3, canceled_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeInt64(4, numReencrypted_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeInt64(5, numFailures_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeUInt64(6, completionTime_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeBytes(7, getLastFileBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getEzKeyVersionNameBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, submissionTime_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(3, canceled_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(4, numReencrypted_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(5, numFailures_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(6, completionTime_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(7, getLastFileBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto) obj;
boolean result = true;
result = result && (hasEzKeyVersionName() == other.hasEzKeyVersionName());
if (hasEzKeyVersionName()) {
result = result && getEzKeyVersionName()
.equals(other.getEzKeyVersionName());
}
result = result && (hasSubmissionTime() == other.hasSubmissionTime());
if (hasSubmissionTime()) {
result = result && (getSubmissionTime()
== other.getSubmissionTime());
}
result = result && (hasCanceled() == other.hasCanceled());
if (hasCanceled()) {
result = result && (getCanceled()
== other.getCanceled());
}
result = result && (hasNumReencrypted() == other.hasNumReencrypted());
if (hasNumReencrypted()) {
result = result && (getNumReencrypted()
== other.getNumReencrypted());
}
result = result && (hasNumFailures() == other.hasNumFailures());
if (hasNumFailures()) {
result = result && (getNumFailures()
== other.getNumFailures());
}
result = result && (hasCompletionTime() == other.hasCompletionTime());
if (hasCompletionTime()) {
result = result && (getCompletionTime()
== other.getCompletionTime());
}
result = result && (hasLastFile() == other.hasLastFile());
if (hasLastFile()) {
result = result && getLastFile()
.equals(other.getLastFile());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasEzKeyVersionName()) {
hash = (37 * hash) + EZKEYVERSIONNAME_FIELD_NUMBER;
hash = (53 * hash) + getEzKeyVersionName().hashCode();
}
if (hasSubmissionTime()) {
hash = (37 * hash) + SUBMISSIONTIME_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getSubmissionTime());
}
if (hasCanceled()) {
hash = (37 * hash) + CANCELED_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getCanceled());
}
if (hasNumReencrypted()) {
hash = (37 * hash) + NUMREENCRYPTED_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getNumReencrypted());
}
if (hasNumFailures()) {
hash = (37 * hash) + NUMFAILURES_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getNumFailures());
}
if (hasCompletionTime()) {
hash = (37 * hash) + COMPLETIONTIME_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getCompletionTime());
}
if (hasLastFile()) {
hash = (37 * hash) + LASTFILE_FIELD_NUMBER;
hash = (53 * hash) + getLastFile().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ReencryptionInfoProto}
*
*
**
* Re-encryption information for an encryption zone
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ReencryptionInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ReencryptionInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
ezKeyVersionName_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
submissionTime_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
canceled_ = false;
bitField0_ = (bitField0_ & ~0x00000004);
numReencrypted_ = 0L;
bitField0_ = (bitField0_ & ~0x00000008);
numFailures_ = 0L;
bitField0_ = (bitField0_ & ~0x00000010);
completionTime_ = 0L;
bitField0_ = (bitField0_ & ~0x00000020);
lastFile_ = "";
bitField0_ = (bitField0_ & ~0x00000040);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ReencryptionInfoProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.ezKeyVersionName_ = ezKeyVersionName_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.submissionTime_ = submissionTime_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.canceled_ = canceled_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.numReencrypted_ = numReencrypted_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.numFailures_ = numFailures_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000020;
}
result.completionTime_ = completionTime_;
if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
to_bitField0_ |= 0x00000040;
}
result.lastFile_ = lastFile_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.getDefaultInstance()) return this;
if (other.hasEzKeyVersionName()) {
bitField0_ |= 0x00000001;
ezKeyVersionName_ = other.ezKeyVersionName_;
onChanged();
}
if (other.hasSubmissionTime()) {
setSubmissionTime(other.getSubmissionTime());
}
if (other.hasCanceled()) {
setCanceled(other.getCanceled());
}
if (other.hasNumReencrypted()) {
setNumReencrypted(other.getNumReencrypted());
}
if (other.hasNumFailures()) {
setNumFailures(other.getNumFailures());
}
if (other.hasCompletionTime()) {
setCompletionTime(other.getCompletionTime());
}
if (other.hasLastFile()) {
bitField0_ |= 0x00000040;
lastFile_ = other.lastFile_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasEzKeyVersionName()) {
return false;
}
if (!hasSubmissionTime()) {
return false;
}
if (!hasCanceled()) {
return false;
}
if (!hasNumReencrypted()) {
return false;
}
if (!hasNumFailures()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string ezKeyVersionName = 1;
private java.lang.Object ezKeyVersionName_ = "";
/**
* required string ezKeyVersionName = 1;
*/
public boolean hasEzKeyVersionName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string ezKeyVersionName = 1;
*/
public java.lang.String getEzKeyVersionName() {
java.lang.Object ref = ezKeyVersionName_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
ezKeyVersionName_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string ezKeyVersionName = 1;
*/
public com.google.protobuf.ByteString
getEzKeyVersionNameBytes() {
java.lang.Object ref = ezKeyVersionName_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
ezKeyVersionName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string ezKeyVersionName = 1;
*/
public Builder setEzKeyVersionName(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
ezKeyVersionName_ = value;
onChanged();
return this;
}
/**
* required string ezKeyVersionName = 1;
*/
public Builder clearEzKeyVersionName() {
bitField0_ = (bitField0_ & ~0x00000001);
ezKeyVersionName_ = getDefaultInstance().getEzKeyVersionName();
onChanged();
return this;
}
/**
* required string ezKeyVersionName = 1;
*/
public Builder setEzKeyVersionNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
ezKeyVersionName_ = value;
onChanged();
return this;
}
// required uint64 submissionTime = 2;
private long submissionTime_ ;
/**
* required uint64 submissionTime = 2;
*/
public boolean hasSubmissionTime() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 submissionTime = 2;
*/
public long getSubmissionTime() {
return submissionTime_;
}
/**
* required uint64 submissionTime = 2;
*/
public Builder setSubmissionTime(long value) {
bitField0_ |= 0x00000002;
submissionTime_ = value;
onChanged();
return this;
}
/**
* required uint64 submissionTime = 2;
*/
public Builder clearSubmissionTime() {
bitField0_ = (bitField0_ & ~0x00000002);
submissionTime_ = 0L;
onChanged();
return this;
}
// required bool canceled = 3;
private boolean canceled_ ;
/**
* required bool canceled = 3;
*/
public boolean hasCanceled() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required bool canceled = 3;
*/
public boolean getCanceled() {
return canceled_;
}
/**
* required bool canceled = 3;
*/
public Builder setCanceled(boolean value) {
bitField0_ |= 0x00000004;
canceled_ = value;
onChanged();
return this;
}
/**
* required bool canceled = 3;
*/
public Builder clearCanceled() {
bitField0_ = (bitField0_ & ~0x00000004);
canceled_ = false;
onChanged();
return this;
}
// required int64 numReencrypted = 4;
private long numReencrypted_ ;
/**
* required int64 numReencrypted = 4;
*/
public boolean hasNumReencrypted() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required int64 numReencrypted = 4;
*/
public long getNumReencrypted() {
return numReencrypted_;
}
/**
* required int64 numReencrypted = 4;
*/
public Builder setNumReencrypted(long value) {
bitField0_ |= 0x00000008;
numReencrypted_ = value;
onChanged();
return this;
}
/**
* required int64 numReencrypted = 4;
*/
public Builder clearNumReencrypted() {
bitField0_ = (bitField0_ & ~0x00000008);
numReencrypted_ = 0L;
onChanged();
return this;
}
// required int64 numFailures = 5;
private long numFailures_ ;
/**
* required int64 numFailures = 5;
*/
public boolean hasNumFailures() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required int64 numFailures = 5;
*/
public long getNumFailures() {
return numFailures_;
}
/**
* required int64 numFailures = 5;
*/
public Builder setNumFailures(long value) {
bitField0_ |= 0x00000010;
numFailures_ = value;
onChanged();
return this;
}
/**
* required int64 numFailures = 5;
*/
public Builder clearNumFailures() {
bitField0_ = (bitField0_ & ~0x00000010);
numFailures_ = 0L;
onChanged();
return this;
}
// optional uint64 completionTime = 6;
private long completionTime_ ;
/**
* optional uint64 completionTime = 6;
*/
public boolean hasCompletionTime() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional uint64 completionTime = 6;
*/
public long getCompletionTime() {
return completionTime_;
}
/**
* optional uint64 completionTime = 6;
*/
public Builder setCompletionTime(long value) {
bitField0_ |= 0x00000020;
completionTime_ = value;
onChanged();
return this;
}
/**
* optional uint64 completionTime = 6;
*/
public Builder clearCompletionTime() {
bitField0_ = (bitField0_ & ~0x00000020);
completionTime_ = 0L;
onChanged();
return this;
}
// optional string lastFile = 7;
private java.lang.Object lastFile_ = "";
/**
* optional string lastFile = 7;
*/
public boolean hasLastFile() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional string lastFile = 7;
*/
public java.lang.String getLastFile() {
java.lang.Object ref = lastFile_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
lastFile_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string lastFile = 7;
*/
public com.google.protobuf.ByteString
getLastFileBytes() {
java.lang.Object ref = lastFile_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
lastFile_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* optional string lastFile = 7;
*/
public Builder setLastFile(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000040;
lastFile_ = value;
onChanged();
return this;
}
/**
* optional string lastFile = 7;
*/
public Builder clearLastFile() {
bitField0_ = (bitField0_ & ~0x00000040);
lastFile_ = getDefaultInstance().getLastFile();
onChanged();
return this;
}
/**
* optional string lastFile = 7;
*/
public Builder setLastFileBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000040;
lastFile_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ReencryptionInfoProto)
}
static {
defaultInstance = new ReencryptionInfoProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ReencryptionInfoProto)
}
public interface CipherOptionProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.CipherSuiteProto suite = 1;
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
boolean hasSuite();
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite();
// optional bytes inKey = 2;
/**
* optional bytes inKey = 2;
*/
boolean hasInKey();
/**
* optional bytes inKey = 2;
*/
com.google.protobuf.ByteString getInKey();
// optional bytes inIv = 3;
/**
* optional bytes inIv = 3;
*/
boolean hasInIv();
/**
* optional bytes inIv = 3;
*/
com.google.protobuf.ByteString getInIv();
// optional bytes outKey = 4;
/**
* optional bytes outKey = 4;
*/
boolean hasOutKey();
/**
* optional bytes outKey = 4;
*/
com.google.protobuf.ByteString getOutKey();
// optional bytes outIv = 5;
/**
* optional bytes outIv = 5;
*/
boolean hasOutIv();
/**
* optional bytes outIv = 5;
*/
com.google.protobuf.ByteString getOutIv();
}
/**
* Protobuf type {@code hadoop.hdfs.CipherOptionProto}
*
*
**
* Cipher option
*
*/
public static final class CipherOptionProto extends
com.google.protobuf.GeneratedMessage
implements CipherOptionProtoOrBuilder {
// Use CipherOptionProto.newBuilder() to construct.
private CipherOptionProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private CipherOptionProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final CipherOptionProto defaultInstance;
public static CipherOptionProto getDefaultInstance() {
return defaultInstance;
}
public CipherOptionProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private CipherOptionProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
suite_ = value;
}
break;
}
case 18: {
bitField0_ |= 0x00000002;
inKey_ = input.readBytes();
break;
}
case 26: {
bitField0_ |= 0x00000004;
inIv_ = input.readBytes();
break;
}
case 34: {
bitField0_ |= 0x00000008;
outKey_ = input.readBytes();
break;
}
case 42: {
bitField0_ |= 0x00000010;
outIv_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CipherOptionProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CipherOptionProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public CipherOptionProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new CipherOptionProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.CipherSuiteProto suite = 1;
public static final int SUITE_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto suite_;
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public boolean hasSuite() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() {
return suite_;
}
// optional bytes inKey = 2;
public static final int INKEY_FIELD_NUMBER = 2;
private com.google.protobuf.ByteString inKey_;
/**
* optional bytes inKey = 2;
*/
public boolean hasInKey() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional bytes inKey = 2;
*/
public com.google.protobuf.ByteString getInKey() {
return inKey_;
}
// optional bytes inIv = 3;
public static final int INIV_FIELD_NUMBER = 3;
private com.google.protobuf.ByteString inIv_;
/**
* optional bytes inIv = 3;
*/
public boolean hasInIv() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional bytes inIv = 3;
*/
public com.google.protobuf.ByteString getInIv() {
return inIv_;
}
// optional bytes outKey = 4;
public static final int OUTKEY_FIELD_NUMBER = 4;
private com.google.protobuf.ByteString outKey_;
/**
* optional bytes outKey = 4;
*/
public boolean hasOutKey() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional bytes outKey = 4;
*/
public com.google.protobuf.ByteString getOutKey() {
return outKey_;
}
// optional bytes outIv = 5;
public static final int OUTIV_FIELD_NUMBER = 5;
private com.google.protobuf.ByteString outIv_;
/**
* optional bytes outIv = 5;
*/
public boolean hasOutIv() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional bytes outIv = 5;
*/
public com.google.protobuf.ByteString getOutIv() {
return outIv_;
}
private void initFields() {
suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN;
inKey_ = com.google.protobuf.ByteString.EMPTY;
inIv_ = com.google.protobuf.ByteString.EMPTY;
outKey_ = com.google.protobuf.ByteString.EMPTY;
outIv_ = com.google.protobuf.ByteString.EMPTY;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasSuite()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, suite_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, inKey_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, inIv_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeBytes(4, outKey_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeBytes(5, outIv_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, suite_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, inKey_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, inIv_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(4, outKey_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(5, outIv_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto) obj;
boolean result = true;
result = result && (hasSuite() == other.hasSuite());
if (hasSuite()) {
result = result &&
(getSuite() == other.getSuite());
}
result = result && (hasInKey() == other.hasInKey());
if (hasInKey()) {
result = result && getInKey()
.equals(other.getInKey());
}
result = result && (hasInIv() == other.hasInIv());
if (hasInIv()) {
result = result && getInIv()
.equals(other.getInIv());
}
result = result && (hasOutKey() == other.hasOutKey());
if (hasOutKey()) {
result = result && getOutKey()
.equals(other.getOutKey());
}
result = result && (hasOutIv() == other.hasOutIv());
if (hasOutIv()) {
result = result && getOutIv()
.equals(other.getOutIv());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasSuite()) {
hash = (37 * hash) + SUITE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getSuite());
}
if (hasInKey()) {
hash = (37 * hash) + INKEY_FIELD_NUMBER;
hash = (53 * hash) + getInKey().hashCode();
}
if (hasInIv()) {
hash = (37 * hash) + INIV_FIELD_NUMBER;
hash = (53 * hash) + getInIv().hashCode();
}
if (hasOutKey()) {
hash = (37 * hash) + OUTKEY_FIELD_NUMBER;
hash = (53 * hash) + getOutKey().hashCode();
}
if (hasOutIv()) {
hash = (37 * hash) + OUTIV_FIELD_NUMBER;
hash = (53 * hash) + getOutIv().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.CipherOptionProto}
*
*
**
* Cipher option
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CipherOptionProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CipherOptionProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN;
bitField0_ = (bitField0_ & ~0x00000001);
inKey_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000002);
inIv_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000004);
outKey_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000008);
outIv_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CipherOptionProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.suite_ = suite_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.inKey_ = inKey_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.inIv_ = inIv_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.outKey_ = outKey_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.outIv_ = outIv_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.getDefaultInstance()) return this;
if (other.hasSuite()) {
setSuite(other.getSuite());
}
if (other.hasInKey()) {
setInKey(other.getInKey());
}
if (other.hasInIv()) {
setInIv(other.getInIv());
}
if (other.hasOutKey()) {
setOutKey(other.getOutKey());
}
if (other.hasOutIv()) {
setOutIv(other.getOutIv());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasSuite()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.CipherSuiteProto suite = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN;
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public boolean hasSuite() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() {
return suite_;
}
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public Builder setSuite(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
suite_ = value;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.CipherSuiteProto suite = 1;
*/
public Builder clearSuite() {
bitField0_ = (bitField0_ & ~0x00000001);
suite_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN;
onChanged();
return this;
}
// optional bytes inKey = 2;
private com.google.protobuf.ByteString inKey_ = com.google.protobuf.ByteString.EMPTY;
/**
* optional bytes inKey = 2;
*/
public boolean hasInKey() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional bytes inKey = 2;
*/
public com.google.protobuf.ByteString getInKey() {
return inKey_;
}
/**
* optional bytes inKey = 2;
*/
public Builder setInKey(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
inKey_ = value;
onChanged();
return this;
}
/**
* optional bytes inKey = 2;
*/
public Builder clearInKey() {
bitField0_ = (bitField0_ & ~0x00000002);
inKey_ = getDefaultInstance().getInKey();
onChanged();
return this;
}
// optional bytes inIv = 3;
private com.google.protobuf.ByteString inIv_ = com.google.protobuf.ByteString.EMPTY;
/**
* optional bytes inIv = 3;
*/
public boolean hasInIv() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional bytes inIv = 3;
*/
public com.google.protobuf.ByteString getInIv() {
return inIv_;
}
/**
* optional bytes inIv = 3;
*/
public Builder setInIv(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
inIv_ = value;
onChanged();
return this;
}
/**
* optional bytes inIv = 3;
*/
public Builder clearInIv() {
bitField0_ = (bitField0_ & ~0x00000004);
inIv_ = getDefaultInstance().getInIv();
onChanged();
return this;
}
// optional bytes outKey = 4;
private com.google.protobuf.ByteString outKey_ = com.google.protobuf.ByteString.EMPTY;
/**
* optional bytes outKey = 4;
*/
public boolean hasOutKey() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional bytes outKey = 4;
*/
public com.google.protobuf.ByteString getOutKey() {
return outKey_;
}
/**
* optional bytes outKey = 4;
*/
public Builder setOutKey(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
outKey_ = value;
onChanged();
return this;
}
/**
* optional bytes outKey = 4;
*/
public Builder clearOutKey() {
bitField0_ = (bitField0_ & ~0x00000008);
outKey_ = getDefaultInstance().getOutKey();
onChanged();
return this;
}
// optional bytes outIv = 5;
private com.google.protobuf.ByteString outIv_ = com.google.protobuf.ByteString.EMPTY;
/**
* optional bytes outIv = 5;
*/
public boolean hasOutIv() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional bytes outIv = 5;
*/
public com.google.protobuf.ByteString getOutIv() {
return outIv_;
}
/**
* optional bytes outIv = 5;
*/
public Builder setOutIv(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000010;
outIv_ = value;
onChanged();
return this;
}
/**
* optional bytes outIv = 5;
*/
public Builder clearOutIv() {
bitField0_ = (bitField0_ & ~0x00000010);
outIv_ = getDefaultInstance().getOutIv();
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.CipherOptionProto)
}
static {
defaultInstance = new CipherOptionProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.CipherOptionProto)
}
public interface LocatedBlocksProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required uint64 fileLength = 1;
/**
* required uint64 fileLength = 1;
*/
boolean hasFileLength();
/**
* required uint64 fileLength = 1;
*/
long getFileLength();
// repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
java.util.List
getBlocksList();
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index);
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
int getBlocksCount();
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>
getBlocksOrBuilderList();
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder(
int index);
// required bool underConstruction = 3;
/**
* required bool underConstruction = 3;
*/
boolean hasUnderConstruction();
/**
* required bool underConstruction = 3;
*/
boolean getUnderConstruction();
// optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
/**
* optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
*/
boolean hasLastBlock();
/**
* optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getLastBlock();
/**
* optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getLastBlockOrBuilder();
// required bool isLastBlockComplete = 5;
/**
* required bool isLastBlockComplete = 5;
*/
boolean hasIsLastBlockComplete();
/**
* required bool isLastBlockComplete = 5;
*/
boolean getIsLastBlockComplete();
// optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
*/
boolean hasFileEncryptionInfo();
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo();
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder();
// optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;
*
*
* Optional field for erasure coding
*
*/
boolean hasEcPolicy();
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;
*
*
* Optional field for erasure coding
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy();
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;
*
*
* Optional field for erasure coding
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.LocatedBlocksProto}
*
*
**
* A set of file blocks and their locations.
*
*/
public static final class LocatedBlocksProto extends
com.google.protobuf.GeneratedMessage
implements LocatedBlocksProtoOrBuilder {
// Use LocatedBlocksProto.newBuilder() to construct.
private LocatedBlocksProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private LocatedBlocksProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final LocatedBlocksProto defaultInstance;
public static LocatedBlocksProto getDefaultInstance() {
return defaultInstance;
}
public LocatedBlocksProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private LocatedBlocksProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
fileLength_ = input.readUInt64();
break;
}
case 18: {
if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
blocks_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000002;
}
blocks_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.PARSER, extensionRegistry));
break;
}
case 24: {
bitField0_ |= 0x00000002;
underConstruction_ = input.readBool();
break;
}
case 34: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
subBuilder = lastBlock_.toBuilder();
}
lastBlock_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(lastBlock_);
lastBlock_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000004;
break;
}
case 40: {
bitField0_ |= 0x00000008;
isLastBlockComplete_ = input.readBool();
break;
}
case 50: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000010) == 0x00000010)) {
subBuilder = fileEncryptionInfo_.toBuilder();
}
fileEncryptionInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(fileEncryptionInfo_);
fileEncryptionInfo_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000010;
break;
}
case 58: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000020) == 0x00000020)) {
subBuilder = ecPolicy_.toBuilder();
}
ecPolicy_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(ecPolicy_);
ecPolicy_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000020;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
blocks_ = java.util.Collections.unmodifiableList(blocks_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlocksProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlocksProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public LocatedBlocksProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new LocatedBlocksProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required uint64 fileLength = 1;
public static final int FILELENGTH_FIELD_NUMBER = 1;
private long fileLength_;
/**
* required uint64 fileLength = 1;
*/
public boolean hasFileLength() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint64 fileLength = 1;
*/
public long getFileLength() {
return fileLength_;
}
// repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
public static final int BLOCKS_FIELD_NUMBER = 2;
private java.util.List blocks_;
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public java.util.List getBlocksList() {
return blocks_;
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>
getBlocksOrBuilderList() {
return blocks_;
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public int getBlocksCount() {
return blocks_.size();
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index) {
return blocks_.get(index);
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder(
int index) {
return blocks_.get(index);
}
// required bool underConstruction = 3;
public static final int UNDERCONSTRUCTION_FIELD_NUMBER = 3;
private boolean underConstruction_;
/**
* required bool underConstruction = 3;
*/
public boolean hasUnderConstruction() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required bool underConstruction = 3;
*/
public boolean getUnderConstruction() {
return underConstruction_;
}
// optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
public static final int LASTBLOCK_FIELD_NUMBER = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto lastBlock_;
/**
* optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
*/
public boolean hasLastBlock() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getLastBlock() {
return lastBlock_;
}
/**
* optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getLastBlockOrBuilder() {
return lastBlock_;
}
// required bool isLastBlockComplete = 5;
public static final int ISLASTBLOCKCOMPLETE_FIELD_NUMBER = 5;
private boolean isLastBlockComplete_;
/**
* required bool isLastBlockComplete = 5;
*/
public boolean hasIsLastBlockComplete() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required bool isLastBlockComplete = 5;
*/
public boolean getIsLastBlockComplete() {
return isLastBlockComplete_;
}
// optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
public static final int FILEENCRYPTIONINFO_FIELD_NUMBER = 6;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto fileEncryptionInfo_;
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
*/
public boolean hasFileEncryptionInfo() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo() {
return fileEncryptionInfo_;
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder() {
return fileEncryptionInfo_;
}
// optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;
public static final int ECPOLICY_FIELD_NUMBER = 7;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto ecPolicy_;
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;
*
*
* Optional field for erasure coding
*
*/
public boolean hasEcPolicy() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;
*
*
* Optional field for erasure coding
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy() {
return ecPolicy_;
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;
*
*
* Optional field for erasure coding
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder() {
return ecPolicy_;
}
private void initFields() {
fileLength_ = 0L;
blocks_ = java.util.Collections.emptyList();
underConstruction_ = false;
lastBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance();
isLastBlockComplete_ = false;
fileEncryptionInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance();
ecPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasFileLength()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasUnderConstruction()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasIsLastBlockComplete()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getBlocksCount(); i++) {
if (!getBlocks(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasLastBlock()) {
if (!getLastBlock().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasFileEncryptionInfo()) {
if (!getFileEncryptionInfo().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasEcPolicy()) {
if (!getEcPolicy().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt64(1, fileLength_);
}
for (int i = 0; i < blocks_.size(); i++) {
output.writeMessage(2, blocks_.get(i));
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBool(3, underConstruction_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeMessage(4, lastBlock_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeBool(5, isLastBlockComplete_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeMessage(6, fileEncryptionInfo_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeMessage(7, ecPolicy_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(1, fileLength_);
}
for (int i = 0; i < blocks_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, blocks_.get(i));
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(3, underConstruction_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(4, lastBlock_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(5, isLastBlockComplete_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(6, fileEncryptionInfo_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(7, ecPolicy_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto) obj;
boolean result = true;
result = result && (hasFileLength() == other.hasFileLength());
if (hasFileLength()) {
result = result && (getFileLength()
== other.getFileLength());
}
result = result && getBlocksList()
.equals(other.getBlocksList());
result = result && (hasUnderConstruction() == other.hasUnderConstruction());
if (hasUnderConstruction()) {
result = result && (getUnderConstruction()
== other.getUnderConstruction());
}
result = result && (hasLastBlock() == other.hasLastBlock());
if (hasLastBlock()) {
result = result && getLastBlock()
.equals(other.getLastBlock());
}
result = result && (hasIsLastBlockComplete() == other.hasIsLastBlockComplete());
if (hasIsLastBlockComplete()) {
result = result && (getIsLastBlockComplete()
== other.getIsLastBlockComplete());
}
result = result && (hasFileEncryptionInfo() == other.hasFileEncryptionInfo());
if (hasFileEncryptionInfo()) {
result = result && getFileEncryptionInfo()
.equals(other.getFileEncryptionInfo());
}
result = result && (hasEcPolicy() == other.hasEcPolicy());
if (hasEcPolicy()) {
result = result && getEcPolicy()
.equals(other.getEcPolicy());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasFileLength()) {
hash = (37 * hash) + FILELENGTH_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getFileLength());
}
if (getBlocksCount() > 0) {
hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
hash = (53 * hash) + getBlocksList().hashCode();
}
if (hasUnderConstruction()) {
hash = (37 * hash) + UNDERCONSTRUCTION_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getUnderConstruction());
}
if (hasLastBlock()) {
hash = (37 * hash) + LASTBLOCK_FIELD_NUMBER;
hash = (53 * hash) + getLastBlock().hashCode();
}
if (hasIsLastBlockComplete()) {
hash = (37 * hash) + ISLASTBLOCKCOMPLETE_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getIsLastBlockComplete());
}
if (hasFileEncryptionInfo()) {
hash = (37 * hash) + FILEENCRYPTIONINFO_FIELD_NUMBER;
hash = (53 * hash) + getFileEncryptionInfo().hashCode();
}
if (hasEcPolicy()) {
hash = (37 * hash) + ECPOLICY_FIELD_NUMBER;
hash = (53 * hash) + getEcPolicy().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.LocatedBlocksProto}
*
*
**
* A set of file blocks and their locations.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlocksProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlocksProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getBlocksFieldBuilder();
getLastBlockFieldBuilder();
getFileEncryptionInfoFieldBuilder();
getEcPolicyFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
fileLength_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
if (blocksBuilder_ == null) {
blocks_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
} else {
blocksBuilder_.clear();
}
underConstruction_ = false;
bitField0_ = (bitField0_ & ~0x00000004);
if (lastBlockBuilder_ == null) {
lastBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance();
} else {
lastBlockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
isLastBlockComplete_ = false;
bitField0_ = (bitField0_ & ~0x00000010);
if (fileEncryptionInfoBuilder_ == null) {
fileEncryptionInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance();
} else {
fileEncryptionInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000020);
if (ecPolicyBuilder_ == null) {
ecPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance();
} else {
ecPolicyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000040);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlocksProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.fileLength_ = fileLength_;
if (blocksBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002)) {
blocks_ = java.util.Collections.unmodifiableList(blocks_);
bitField0_ = (bitField0_ & ~0x00000002);
}
result.blocks_ = blocks_;
} else {
result.blocks_ = blocksBuilder_.build();
}
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000002;
}
result.underConstruction_ = underConstruction_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000004;
}
if (lastBlockBuilder_ == null) {
result.lastBlock_ = lastBlock_;
} else {
result.lastBlock_ = lastBlockBuilder_.build();
}
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000008;
}
result.isLastBlockComplete_ = isLastBlockComplete_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000010;
}
if (fileEncryptionInfoBuilder_ == null) {
result.fileEncryptionInfo_ = fileEncryptionInfo_;
} else {
result.fileEncryptionInfo_ = fileEncryptionInfoBuilder_.build();
}
if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
to_bitField0_ |= 0x00000020;
}
if (ecPolicyBuilder_ == null) {
result.ecPolicy_ = ecPolicy_;
} else {
result.ecPolicy_ = ecPolicyBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance()) return this;
if (other.hasFileLength()) {
setFileLength(other.getFileLength());
}
if (blocksBuilder_ == null) {
if (!other.blocks_.isEmpty()) {
if (blocks_.isEmpty()) {
blocks_ = other.blocks_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureBlocksIsMutable();
blocks_.addAll(other.blocks_);
}
onChanged();
}
} else {
if (!other.blocks_.isEmpty()) {
if (blocksBuilder_.isEmpty()) {
blocksBuilder_.dispose();
blocksBuilder_ = null;
blocks_ = other.blocks_;
bitField0_ = (bitField0_ & ~0x00000002);
blocksBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getBlocksFieldBuilder() : null;
} else {
blocksBuilder_.addAllMessages(other.blocks_);
}
}
}
if (other.hasUnderConstruction()) {
setUnderConstruction(other.getUnderConstruction());
}
if (other.hasLastBlock()) {
mergeLastBlock(other.getLastBlock());
}
if (other.hasIsLastBlockComplete()) {
setIsLastBlockComplete(other.getIsLastBlockComplete());
}
if (other.hasFileEncryptionInfo()) {
mergeFileEncryptionInfo(other.getFileEncryptionInfo());
}
if (other.hasEcPolicy()) {
mergeEcPolicy(other.getEcPolicy());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasFileLength()) {
return false;
}
if (!hasUnderConstruction()) {
return false;
}
if (!hasIsLastBlockComplete()) {
return false;
}
for (int i = 0; i < getBlocksCount(); i++) {
if (!getBlocks(i).isInitialized()) {
return false;
}
}
if (hasLastBlock()) {
if (!getLastBlock().isInitialized()) {
return false;
}
}
if (hasFileEncryptionInfo()) {
if (!getFileEncryptionInfo().isInitialized()) {
return false;
}
}
if (hasEcPolicy()) {
if (!getEcPolicy().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required uint64 fileLength = 1;
private long fileLength_ ;
/**
* required uint64 fileLength = 1;
*/
public boolean hasFileLength() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint64 fileLength = 1;
*/
public long getFileLength() {
return fileLength_;
}
/**
* required uint64 fileLength = 1;
*/
public Builder setFileLength(long value) {
bitField0_ |= 0x00000001;
fileLength_ = value;
onChanged();
return this;
}
/**
* required uint64 fileLength = 1;
*/
public Builder clearFileLength() {
bitField0_ = (bitField0_ & ~0x00000001);
fileLength_ = 0L;
onChanged();
return this;
}
// repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
private java.util.List blocks_ =
java.util.Collections.emptyList();
private void ensureBlocksIsMutable() {
if (!((bitField0_ & 0x00000002) == 0x00000002)) {
blocks_ = new java.util.ArrayList(blocks_);
bitField0_ |= 0x00000002;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> blocksBuilder_;
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public java.util.List getBlocksList() {
if (blocksBuilder_ == null) {
return java.util.Collections.unmodifiableList(blocks_);
} else {
return blocksBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public int getBlocksCount() {
if (blocksBuilder_ == null) {
return blocks_.size();
} else {
return blocksBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index) {
if (blocksBuilder_ == null) {
return blocks_.get(index);
} else {
return blocksBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public Builder setBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.set(index, value);
onChanged();
} else {
blocksBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public Builder setBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.set(index, builderForValue.build());
onChanged();
} else {
blocksBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.add(value);
onChanged();
} else {
blocksBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public Builder addBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
if (blocksBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlocksIsMutable();
blocks_.add(index, value);
onChanged();
} else {
blocksBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public Builder addBlocks(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.add(builderForValue.build());
onChanged();
} else {
blocksBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public Builder addBlocks(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.add(index, builderForValue.build());
onChanged();
} else {
blocksBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public Builder addAllBlocks(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto> values) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
super.addAll(values, blocks_);
onChanged();
} else {
blocksBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public Builder clearBlocks() {
if (blocksBuilder_ == null) {
blocks_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
} else {
blocksBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public Builder removeBlocks(int index) {
if (blocksBuilder_ == null) {
ensureBlocksIsMutable();
blocks_.remove(index);
onChanged();
} else {
blocksBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getBlocksBuilder(
int index) {
return getBlocksFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder(
int index) {
if (blocksBuilder_ == null) {
return blocks_.get(index); } else {
return blocksBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>
getBlocksOrBuilderList() {
if (blocksBuilder_ != null) {
return blocksBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(blocks_);
}
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder addBlocksBuilder() {
return getBlocksFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder addBlocksBuilder(
int index) {
return getBlocksFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;
*/
public java.util.List
getBlocksBuilderList() {
return getBlocksFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>
getBlocksFieldBuilder() {
if (blocksBuilder_ == null) {
blocksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>(
blocks_,
((bitField0_ & 0x00000002) == 0x00000002),
getParentForChildren(),
isClean());
blocks_ = null;
}
return blocksBuilder_;
}
// required bool underConstruction = 3;
private boolean underConstruction_ ;
/**
* required bool underConstruction = 3;
*/
public boolean hasUnderConstruction() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required bool underConstruction = 3;
*/
public boolean getUnderConstruction() {
return underConstruction_;
}
/**
* required bool underConstruction = 3;
*/
public Builder setUnderConstruction(boolean value) {
bitField0_ |= 0x00000004;
underConstruction_ = value;
onChanged();
return this;
}
/**
* required bool underConstruction = 3;
*/
public Builder clearUnderConstruction() {
bitField0_ = (bitField0_ & ~0x00000004);
underConstruction_ = false;
onChanged();
return this;
}
// optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto lastBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> lastBlockBuilder_;
/**
* optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
*/
public boolean hasLastBlock() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getLastBlock() {
if (lastBlockBuilder_ == null) {
return lastBlock_;
} else {
return lastBlockBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
*/
public Builder setLastBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
if (lastBlockBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
lastBlock_ = value;
onChanged();
} else {
lastBlockBuilder_.setMessage(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
*/
public Builder setLastBlock(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) {
if (lastBlockBuilder_ == null) {
lastBlock_ = builderForValue.build();
onChanged();
} else {
lastBlockBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
*/
public Builder mergeLastBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
if (lastBlockBuilder_ == null) {
if (((bitField0_ & 0x00000008) == 0x00000008) &&
lastBlock_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()) {
lastBlock_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder(lastBlock_).mergeFrom(value).buildPartial();
} else {
lastBlock_ = value;
}
onChanged();
} else {
lastBlockBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
*/
public Builder clearLastBlock() {
if (lastBlockBuilder_ == null) {
lastBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance();
onChanged();
} else {
lastBlockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
/**
* optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getLastBlockBuilder() {
bitField0_ |= 0x00000008;
onChanged();
return getLastBlockFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getLastBlockOrBuilder() {
if (lastBlockBuilder_ != null) {
return lastBlockBuilder_.getMessageOrBuilder();
} else {
return lastBlock_;
}
}
/**
* optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>
getLastBlockFieldBuilder() {
if (lastBlockBuilder_ == null) {
lastBlockBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>(
lastBlock_,
getParentForChildren(),
isClean());
lastBlock_ = null;
}
return lastBlockBuilder_;
}
// required bool isLastBlockComplete = 5;
private boolean isLastBlockComplete_ ;
/**
* required bool isLastBlockComplete = 5;
*/
public boolean hasIsLastBlockComplete() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required bool isLastBlockComplete = 5;
*/
public boolean getIsLastBlockComplete() {
return isLastBlockComplete_;
}
/**
* required bool isLastBlockComplete = 5;
*/
public Builder setIsLastBlockComplete(boolean value) {
bitField0_ |= 0x00000010;
isLastBlockComplete_ = value;
onChanged();
return this;
}
/**
* required bool isLastBlockComplete = 5;
*/
public Builder clearIsLastBlockComplete() {
bitField0_ = (bitField0_ & ~0x00000010);
isLastBlockComplete_ = false;
onChanged();
return this;
}
// optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto fileEncryptionInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder> fileEncryptionInfoBuilder_;
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
*/
public boolean hasFileEncryptionInfo() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo() {
if (fileEncryptionInfoBuilder_ == null) {
return fileEncryptionInfo_;
} else {
return fileEncryptionInfoBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
*/
public Builder setFileEncryptionInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto value) {
if (fileEncryptionInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
fileEncryptionInfo_ = value;
onChanged();
} else {
fileEncryptionInfoBuilder_.setMessage(value);
}
bitField0_ |= 0x00000020;
return this;
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
*/
public Builder setFileEncryptionInfo(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder builderForValue) {
if (fileEncryptionInfoBuilder_ == null) {
fileEncryptionInfo_ = builderForValue.build();
onChanged();
} else {
fileEncryptionInfoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000020;
return this;
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
*/
public Builder mergeFileEncryptionInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto value) {
if (fileEncryptionInfoBuilder_ == null) {
if (((bitField0_ & 0x00000020) == 0x00000020) &&
fileEncryptionInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance()) {
fileEncryptionInfo_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.newBuilder(fileEncryptionInfo_).mergeFrom(value).buildPartial();
} else {
fileEncryptionInfo_ = value;
}
onChanged();
} else {
fileEncryptionInfoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000020;
return this;
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
*/
public Builder clearFileEncryptionInfo() {
if (fileEncryptionInfoBuilder_ == null) {
fileEncryptionInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance();
onChanged();
} else {
fileEncryptionInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000020);
return this;
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder getFileEncryptionInfoBuilder() {
bitField0_ |= 0x00000020;
onChanged();
return getFileEncryptionInfoFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder() {
if (fileEncryptionInfoBuilder_ != null) {
return fileEncryptionInfoBuilder_.getMessageOrBuilder();
} else {
return fileEncryptionInfo_;
}
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder>
getFileEncryptionInfoFieldBuilder() {
if (fileEncryptionInfoBuilder_ == null) {
fileEncryptionInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder>(
fileEncryptionInfo_,
getParentForChildren(),
isClean());
fileEncryptionInfo_ = null;
}
return fileEncryptionInfoBuilder_;
}
// optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto ecPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> ecPolicyBuilder_;
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;
*
*
* Optional field for erasure coding
*
*/
public boolean hasEcPolicy() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;
*
*
* Optional field for erasure coding
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy() {
if (ecPolicyBuilder_ == null) {
return ecPolicy_;
} else {
return ecPolicyBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;
*
*
* Optional field for erasure coding
*
*/
public Builder setEcPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) {
if (ecPolicyBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ecPolicy_ = value;
onChanged();
} else {
ecPolicyBuilder_.setMessage(value);
}
bitField0_ |= 0x00000040;
return this;
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;
*
*
* Optional field for erasure coding
*
*/
public Builder setEcPolicy(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder builderForValue) {
if (ecPolicyBuilder_ == null) {
ecPolicy_ = builderForValue.build();
onChanged();
} else {
ecPolicyBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000040;
return this;
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;
*
*
* Optional field for erasure coding
*
*/
public Builder mergeEcPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) {
if (ecPolicyBuilder_ == null) {
if (((bitField0_ & 0x00000040) == 0x00000040) &&
ecPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance()) {
ecPolicy_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.newBuilder(ecPolicy_).mergeFrom(value).buildPartial();
} else {
ecPolicy_ = value;
}
onChanged();
} else {
ecPolicyBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000040;
return this;
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;
*
*
* Optional field for erasure coding
*
*/
public Builder clearEcPolicy() {
if (ecPolicyBuilder_ == null) {
ecPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance();
onChanged();
} else {
ecPolicyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000040);
return this;
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;
*
*
* Optional field for erasure coding
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder getEcPolicyBuilder() {
bitField0_ |= 0x00000040;
onChanged();
return getEcPolicyFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;
*
*
* Optional field for erasure coding
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder() {
if (ecPolicyBuilder_ != null) {
return ecPolicyBuilder_.getMessageOrBuilder();
} else {
return ecPolicy_;
}
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;
*
*
* Optional field for erasure coding
*
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder>
getEcPolicyFieldBuilder() {
if (ecPolicyBuilder_ == null) {
ecPolicyBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder>(
ecPolicy_,
getParentForChildren(),
isClean());
ecPolicy_ = null;
}
return ecPolicyBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.LocatedBlocksProto)
}
static {
defaultInstance = new LocatedBlocksProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.LocatedBlocksProto)
}
public interface ECSchemaOptionEntryProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string key = 1;
/**
* required string key = 1;
*/
boolean hasKey();
/**
* required string key = 1;
*/
java.lang.String getKey();
/**
* required string key = 1;
*/
com.google.protobuf.ByteString
getKeyBytes();
// required string value = 2;
/**
* required string value = 2;
*/
boolean hasValue();
/**
* required string value = 2;
*/
java.lang.String getValue();
/**
* required string value = 2;
*/
com.google.protobuf.ByteString
getValueBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.ECSchemaOptionEntryProto}
*
*
**
* ECSchema options entry
*
*/
public static final class ECSchemaOptionEntryProto extends
com.google.protobuf.GeneratedMessage
implements ECSchemaOptionEntryProtoOrBuilder {
// Use ECSchemaOptionEntryProto.newBuilder() to construct.
private ECSchemaOptionEntryProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ECSchemaOptionEntryProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ECSchemaOptionEntryProto defaultInstance;
public static ECSchemaOptionEntryProto getDefaultInstance() {
return defaultInstance;
}
public ECSchemaOptionEntryProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ECSchemaOptionEntryProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
key_ = input.readBytes();
break;
}
case 18: {
bitField0_ |= 0x00000002;
value_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ECSchemaOptionEntryProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ECSchemaOptionEntryProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required string key = 1;
public static final int KEY_FIELD_NUMBER = 1;
private java.lang.Object key_;
/**
* required string key = 1;
*/
public boolean hasKey() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string key = 1;
*/
public java.lang.String getKey() {
java.lang.Object ref = key_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
key_ = s;
}
return s;
}
}
/**
* required string key = 1;
*/
public com.google.protobuf.ByteString
getKeyBytes() {
java.lang.Object ref = key_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
key_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required string value = 2;
public static final int VALUE_FIELD_NUMBER = 2;
private java.lang.Object value_;
/**
* required string value = 2;
*/
public boolean hasValue() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string value = 2;
*/
public java.lang.String getValue() {
java.lang.Object ref = value_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
value_ = s;
}
return s;
}
}
/**
* required string value = 2;
*/
public com.google.protobuf.ByteString
getValueBytes() {
java.lang.Object ref = value_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
value_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
key_ = "";
value_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasKey()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasValue()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getKeyBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getValueBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getKeyBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getValueBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto) obj;
boolean result = true;
result = result && (hasKey() == other.hasKey());
if (hasKey()) {
result = result && getKey()
.equals(other.getKey());
}
result = result && (hasValue() == other.hasValue());
if (hasValue()) {
result = result && getValue()
.equals(other.getValue());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasKey()) {
hash = (37 * hash) + KEY_FIELD_NUMBER;
hash = (53 * hash) + getKey().hashCode();
}
if (hasValue()) {
hash = (37 * hash) + VALUE_FIELD_NUMBER;
hash = (53 * hash) + getValue().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ECSchemaOptionEntryProto}
*
*
**
* ECSchema options entry
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
key_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
value_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.key_ = key_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.value_ = value_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.getDefaultInstance()) return this;
if (other.hasKey()) {
bitField0_ |= 0x00000001;
key_ = other.key_;
onChanged();
}
if (other.hasValue()) {
bitField0_ |= 0x00000002;
value_ = other.value_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasKey()) {
return false;
}
if (!hasValue()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string key = 1;
private java.lang.Object key_ = "";
/**
* required string key = 1;
*/
public boolean hasKey() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string key = 1;
*/
public java.lang.String getKey() {
java.lang.Object ref = key_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
key_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string key = 1;
*/
public com.google.protobuf.ByteString
getKeyBytes() {
java.lang.Object ref = key_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
key_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string key = 1;
*/
public Builder setKey(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
key_ = value;
onChanged();
return this;
}
/**
* required string key = 1;
*/
public Builder clearKey() {
bitField0_ = (bitField0_ & ~0x00000001);
key_ = getDefaultInstance().getKey();
onChanged();
return this;
}
/**
* required string key = 1;
*/
public Builder setKeyBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
key_ = value;
onChanged();
return this;
}
// required string value = 2;
private java.lang.Object value_ = "";
/**
* required string value = 2;
*/
public boolean hasValue() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string value = 2;
*/
public java.lang.String getValue() {
java.lang.Object ref = value_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
value_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string value = 2;
*/
public com.google.protobuf.ByteString
getValueBytes() {
java.lang.Object ref = value_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
value_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string value = 2;
*/
public Builder setValue(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
value_ = value;
onChanged();
return this;
}
/**
* required string value = 2;
*/
public Builder clearValue() {
bitField0_ = (bitField0_ & ~0x00000002);
value_ = getDefaultInstance().getValue();
onChanged();
return this;
}
/**
* required string value = 2;
*/
public Builder setValueBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
value_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ECSchemaOptionEntryProto)
}
static {
defaultInstance = new ECSchemaOptionEntryProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ECSchemaOptionEntryProto)
}
public interface ECSchemaProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string codecName = 1;
/**
* required string codecName = 1;
*/
boolean hasCodecName();
/**
* required string codecName = 1;
*/
java.lang.String getCodecName();
/**
* required string codecName = 1;
*/
com.google.protobuf.ByteString
getCodecNameBytes();
// required uint32 dataUnits = 2;
/**
* required uint32 dataUnits = 2;
*/
boolean hasDataUnits();
/**
* required uint32 dataUnits = 2;
*/
int getDataUnits();
// required uint32 parityUnits = 3;
/**
* required uint32 parityUnits = 3;
*/
boolean hasParityUnits();
/**
* required uint32 parityUnits = 3;
*/
int getParityUnits();
// repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
/**
* repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
*/
java.util.List
getOptionsList();
/**
* repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto getOptions(int index);
/**
* repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
*/
int getOptionsCount();
/**
* repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder>
getOptionsOrBuilderList();
/**
* repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder getOptionsOrBuilder(
int index);
}
/**
* Protobuf type {@code hadoop.hdfs.ECSchemaProto}
*
*
**
* ECSchema for erasurecoding
*
*/
public static final class ECSchemaProto extends
com.google.protobuf.GeneratedMessage
implements ECSchemaProtoOrBuilder {
// Use ECSchemaProto.newBuilder() to construct.
private ECSchemaProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ECSchemaProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ECSchemaProto defaultInstance;
public static ECSchemaProto getDefaultInstance() {
return defaultInstance;
}
public ECSchemaProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ECSchemaProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
codecName_ = input.readBytes();
break;
}
case 16: {
bitField0_ |= 0x00000002;
dataUnits_ = input.readUInt32();
break;
}
case 24: {
bitField0_ |= 0x00000004;
parityUnits_ = input.readUInt32();
break;
}
case 34: {
if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
options_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000008;
}
options_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.PARSER, extensionRegistry));
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
options_ = java.util.Collections.unmodifiableList(options_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ECSchemaProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ECSchemaProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required string codecName = 1;
public static final int CODECNAME_FIELD_NUMBER = 1;
private java.lang.Object codecName_;
/**
* required string codecName = 1;
*/
public boolean hasCodecName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string codecName = 1;
*/
public java.lang.String getCodecName() {
java.lang.Object ref = codecName_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
codecName_ = s;
}
return s;
}
}
/**
* required string codecName = 1;
*/
public com.google.protobuf.ByteString
getCodecNameBytes() {
java.lang.Object ref = codecName_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
codecName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required uint32 dataUnits = 2;
public static final int DATAUNITS_FIELD_NUMBER = 2;
private int dataUnits_;
/**
* required uint32 dataUnits = 2;
*/
public boolean hasDataUnits() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint32 dataUnits = 2;
*/
public int getDataUnits() {
return dataUnits_;
}
// required uint32 parityUnits = 3;
public static final int PARITYUNITS_FIELD_NUMBER = 3;
private int parityUnits_;
/**
* required uint32 parityUnits = 3;
*/
public boolean hasParityUnits() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint32 parityUnits = 3;
*/
public int getParityUnits() {
return parityUnits_;
}
// repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
public static final int OPTIONS_FIELD_NUMBER = 4;
private java.util.List options_;
/**
* repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
*/
public java.util.List getOptionsList() {
return options_;
}
/**
* repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder>
getOptionsOrBuilderList() {
return options_;
}
/**
* repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
*/
public int getOptionsCount() {
return options_.size();
}
/**
* repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto getOptions(int index) {
return options_.get(index);
}
/**
* repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder getOptionsOrBuilder(
int index) {
return options_.get(index);
}
private void initFields() {
codecName_ = "";
dataUnits_ = 0;
parityUnits_ = 0;
options_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasCodecName()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasDataUnits()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasParityUnits()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getOptionsCount(); i++) {
if (!getOptions(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getCodecNameBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt32(2, dataUnits_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt32(3, parityUnits_);
}
for (int i = 0; i < options_.size(); i++) {
output.writeMessage(4, options_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getCodecNameBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(2, dataUnits_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(3, parityUnits_);
}
for (int i = 0; i < options_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(4, options_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto) obj;
boolean result = true;
result = result && (hasCodecName() == other.hasCodecName());
if (hasCodecName()) {
result = result && getCodecName()
.equals(other.getCodecName());
}
result = result && (hasDataUnits() == other.hasDataUnits());
if (hasDataUnits()) {
result = result && (getDataUnits()
== other.getDataUnits());
}
result = result && (hasParityUnits() == other.hasParityUnits());
if (hasParityUnits()) {
result = result && (getParityUnits()
== other.getParityUnits());
}
result = result && getOptionsList()
.equals(other.getOptionsList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasCodecName()) {
hash = (37 * hash) + CODECNAME_FIELD_NUMBER;
hash = (53 * hash) + getCodecName().hashCode();
}
if (hasDataUnits()) {
hash = (37 * hash) + DATAUNITS_FIELD_NUMBER;
hash = (53 * hash) + getDataUnits();
}
if (hasParityUnits()) {
hash = (37 * hash) + PARITYUNITS_FIELD_NUMBER;
hash = (53 * hash) + getParityUnits();
}
if (getOptionsCount() > 0) {
hash = (37 * hash) + OPTIONS_FIELD_NUMBER;
hash = (53 * hash) + getOptionsList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ECSchemaProto}
*
*
**
* ECSchema for erasurecoding
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getOptionsFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
codecName_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
dataUnits_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
parityUnits_ = 0;
bitField0_ = (bitField0_ & ~0x00000004);
if (optionsBuilder_ == null) {
options_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000008);
} else {
optionsBuilder_.clear();
}
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.codecName_ = codecName_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.dataUnits_ = dataUnits_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.parityUnits_ = parityUnits_;
if (optionsBuilder_ == null) {
if (((bitField0_ & 0x00000008) == 0x00000008)) {
options_ = java.util.Collections.unmodifiableList(options_);
bitField0_ = (bitField0_ & ~0x00000008);
}
result.options_ = options_;
} else {
result.options_ = optionsBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.getDefaultInstance()) return this;
if (other.hasCodecName()) {
bitField0_ |= 0x00000001;
codecName_ = other.codecName_;
onChanged();
}
if (other.hasDataUnits()) {
setDataUnits(other.getDataUnits());
}
if (other.hasParityUnits()) {
setParityUnits(other.getParityUnits());
}
if (optionsBuilder_ == null) {
if (!other.options_.isEmpty()) {
if (options_.isEmpty()) {
options_ = other.options_;
bitField0_ = (bitField0_ & ~0x00000008);
} else {
ensureOptionsIsMutable();
options_.addAll(other.options_);
}
onChanged();
}
} else {
if (!other.options_.isEmpty()) {
if (optionsBuilder_.isEmpty()) {
optionsBuilder_.dispose();
optionsBuilder_ = null;
options_ = other.options_;
bitField0_ = (bitField0_ & ~0x00000008);
optionsBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getOptionsFieldBuilder() : null;
} else {
optionsBuilder_.addAllMessages(other.options_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasCodecName()) {
return false;
}
if (!hasDataUnits()) {
return false;
}
if (!hasParityUnits()) {
return false;
}
for (int i = 0; i < getOptionsCount(); i++) {
if (!getOptions(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string codecName = 1;
private java.lang.Object codecName_ = "";
/**
* required string codecName = 1;
*/
public boolean hasCodecName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string codecName = 1;
*/
public java.lang.String getCodecName() {
java.lang.Object ref = codecName_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
codecName_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string codecName = 1;
*/
public com.google.protobuf.ByteString
getCodecNameBytes() {
java.lang.Object ref = codecName_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
codecName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string codecName = 1;
*/
public Builder setCodecName(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
codecName_ = value;
onChanged();
return this;
}
/**
* required string codecName = 1;
*/
public Builder clearCodecName() {
bitField0_ = (bitField0_ & ~0x00000001);
codecName_ = getDefaultInstance().getCodecName();
onChanged();
return this;
}
/**
* required string codecName = 1;
*/
public Builder setCodecNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
codecName_ = value;
onChanged();
return this;
}
// required uint32 dataUnits = 2;
private int dataUnits_ ;
/**
* required uint32 dataUnits = 2;
*/
public boolean hasDataUnits() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint32 dataUnits = 2;
*/
public int getDataUnits() {
return dataUnits_;
}
/**
* required uint32 dataUnits = 2;
*/
public Builder setDataUnits(int value) {
bitField0_ |= 0x00000002;
dataUnits_ = value;
onChanged();
return this;
}
/**
* required uint32 dataUnits = 2;
*/
public Builder clearDataUnits() {
bitField0_ = (bitField0_ & ~0x00000002);
dataUnits_ = 0;
onChanged();
return this;
}
// required uint32 parityUnits = 3;
private int parityUnits_ ;
/**
* required uint32 parityUnits = 3;
*/
public boolean hasParityUnits() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint32 parityUnits = 3;
*/
public int getParityUnits() {
return parityUnits_;
}
/**
* required uint32 parityUnits = 3;
*/
public Builder setParityUnits(int value) {
bitField0_ |= 0x00000004;
parityUnits_ = value;
onChanged();
return this;
}
/**
* required uint32 parityUnits = 3;
*/
public Builder clearParityUnits() {
bitField0_ = (bitField0_ & ~0x00000004);
parityUnits_ = 0;
onChanged();
return this;
}
// repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
private java.util.List options_ =
java.util.Collections.emptyList();
private void ensureOptionsIsMutable() {
if (!((bitField0_ & 0x00000008) == 0x00000008)) {
options_ = new java.util.ArrayList(options_);
bitField0_ |= 0x00000008;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder> optionsBuilder_;
/**
* repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
*/
public java.util.List getOptionsList() {
if (optionsBuilder_ == null) {
return java.util.Collections.unmodifiableList(options_);
} else {
return optionsBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
*/
public int getOptionsCount() {
if (optionsBuilder_ == null) {
return options_.size();
} else {
return optionsBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto getOptions(int index) {
if (optionsBuilder_ == null) {
return options_.get(index);
} else {
return optionsBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
*/
public Builder setOptions(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto value) {
if (optionsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureOptionsIsMutable();
options_.set(index, value);
onChanged();
} else {
optionsBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
*/
public Builder setOptions(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder builderForValue) {
if (optionsBuilder_ == null) {
ensureOptionsIsMutable();
options_.set(index, builderForValue.build());
onChanged();
} else {
optionsBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
*/
public Builder addOptions(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto value) {
if (optionsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureOptionsIsMutable();
options_.add(value);
onChanged();
} else {
optionsBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
*/
public Builder addOptions(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto value) {
if (optionsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureOptionsIsMutable();
options_.add(index, value);
onChanged();
} else {
optionsBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
*/
public Builder addOptions(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder builderForValue) {
if (optionsBuilder_ == null) {
ensureOptionsIsMutable();
options_.add(builderForValue.build());
onChanged();
} else {
optionsBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
*/
public Builder addOptions(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder builderForValue) {
if (optionsBuilder_ == null) {
ensureOptionsIsMutable();
options_.add(index, builderForValue.build());
onChanged();
} else {
optionsBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
*/
public Builder addAllOptions(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto> values) {
if (optionsBuilder_ == null) {
ensureOptionsIsMutable();
super.addAll(values, options_);
onChanged();
} else {
optionsBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
*/
public Builder clearOptions() {
if (optionsBuilder_ == null) {
options_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000008);
onChanged();
} else {
optionsBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
*/
public Builder removeOptions(int index) {
if (optionsBuilder_ == null) {
ensureOptionsIsMutable();
options_.remove(index);
onChanged();
} else {
optionsBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder getOptionsBuilder(
int index) {
return getOptionsFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder getOptionsOrBuilder(
int index) {
if (optionsBuilder_ == null) {
return options_.get(index); } else {
return optionsBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder>
getOptionsOrBuilderList() {
if (optionsBuilder_ != null) {
return optionsBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(options_);
}
}
/**
* repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder addOptionsBuilder() {
return getOptionsFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder addOptionsBuilder(
int index) {
return getOptionsFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;
*/
public java.util.List
getOptionsBuilderList() {
return getOptionsFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder>
getOptionsFieldBuilder() {
if (optionsBuilder_ == null) {
optionsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder>(
options_,
((bitField0_ & 0x00000008) == 0x00000008),
getParentForChildren(),
isClean());
options_ = null;
}
return optionsBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ECSchemaProto)
}
static {
defaultInstance = new ECSchemaProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ECSchemaProto)
}
public interface ErasureCodingPolicyProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// optional string name = 1;
/**
* optional string name = 1;
*/
boolean hasName();
/**
* optional string name = 1;
*/
java.lang.String getName();
/**
* optional string name = 1;
*/
com.google.protobuf.ByteString
getNameBytes();
// optional .hadoop.hdfs.ECSchemaProto schema = 2;
/**
* optional .hadoop.hdfs.ECSchemaProto schema = 2;
*/
boolean hasSchema();
/**
* optional .hadoop.hdfs.ECSchemaProto schema = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto getSchema();
/**
* optional .hadoop.hdfs.ECSchemaProto schema = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProtoOrBuilder getSchemaOrBuilder();
// optional uint32 cellSize = 3;
/**
* optional uint32 cellSize = 3;
*/
boolean hasCellSize();
/**
* optional uint32 cellSize = 3;
*/
int getCellSize();
// required uint32 id = 4;
/**
* required uint32 id = 4;
*
*
* Actually a byte - only 8 bits used
*
*/
boolean hasId();
/**
* required uint32 id = 4;
*
*
* Actually a byte - only 8 bits used
*
*/
int getId();
// optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED];
/**
* optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED];
*/
boolean hasState();
/**
* optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED];
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState getState();
}
/**
* Protobuf type {@code hadoop.hdfs.ErasureCodingPolicyProto}
*/
public static final class ErasureCodingPolicyProto extends
com.google.protobuf.GeneratedMessage
implements ErasureCodingPolicyProtoOrBuilder {
// Use ErasureCodingPolicyProto.newBuilder() to construct.
private ErasureCodingPolicyProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ErasureCodingPolicyProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ErasureCodingPolicyProto defaultInstance;
public static ErasureCodingPolicyProto getDefaultInstance() {
return defaultInstance;
}
public ErasureCodingPolicyProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ErasureCodingPolicyProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
name_ = input.readBytes();
break;
}
case 18: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
subBuilder = schema_.toBuilder();
}
schema_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(schema_);
schema_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
case 24: {
bitField0_ |= 0x00000004;
cellSize_ = input.readUInt32();
break;
}
case 32: {
bitField0_ |= 0x00000008;
id_ = input.readUInt32();
break;
}
case 40: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(5, rawValue);
} else {
bitField0_ |= 0x00000010;
state_ = value;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ErasureCodingPolicyProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ErasureCodingPolicyProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ErasureCodingPolicyProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ErasureCodingPolicyProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// optional string name = 1;
public static final int NAME_FIELD_NUMBER = 1;
private java.lang.Object name_;
/**
* optional string name = 1;
*/
public boolean hasName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* optional string name = 1;
*/
public java.lang.String getName() {
java.lang.Object ref = name_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
name_ = s;
}
return s;
}
}
/**
* optional string name = 1;
*/
public com.google.protobuf.ByteString
getNameBytes() {
java.lang.Object ref = name_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
name_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// optional .hadoop.hdfs.ECSchemaProto schema = 2;
public static final int SCHEMA_FIELD_NUMBER = 2;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto schema_;
/**
* optional .hadoop.hdfs.ECSchemaProto schema = 2;
*/
public boolean hasSchema() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional .hadoop.hdfs.ECSchemaProto schema = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto getSchema() {
return schema_;
}
/**
* optional .hadoop.hdfs.ECSchemaProto schema = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProtoOrBuilder getSchemaOrBuilder() {
return schema_;
}
// optional uint32 cellSize = 3;
public static final int CELLSIZE_FIELD_NUMBER = 3;
private int cellSize_;
/**
* optional uint32 cellSize = 3;
*/
public boolean hasCellSize() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional uint32 cellSize = 3;
*/
public int getCellSize() {
return cellSize_;
}
// required uint32 id = 4;
public static final int ID_FIELD_NUMBER = 4;
private int id_;
/**
* required uint32 id = 4;
*
*
* Actually a byte - only 8 bits used
*
*/
public boolean hasId() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required uint32 id = 4;
*
*
* Actually a byte - only 8 bits used
*
*/
public int getId() {
return id_;
}
// optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED];
public static final int STATE_FIELD_NUMBER = 5;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState state_;
/**
* optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED];
*/
public boolean hasState() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED];
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState getState() {
return state_;
}
private void initFields() {
name_ = "";
schema_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.getDefaultInstance();
cellSize_ = 0;
id_ = 0;
state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState.ENABLED;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasId()) {
memoizedIsInitialized = 0;
return false;
}
if (hasSchema()) {
if (!getSchema().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getNameBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeMessage(2, schema_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt32(3, cellSize_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt32(4, id_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeEnum(5, state_.getNumber());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getNameBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, schema_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(3, cellSize_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(4, id_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(5, state_.getNumber());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto) obj;
boolean result = true;
result = result && (hasName() == other.hasName());
if (hasName()) {
result = result && getName()
.equals(other.getName());
}
result = result && (hasSchema() == other.hasSchema());
if (hasSchema()) {
result = result && getSchema()
.equals(other.getSchema());
}
result = result && (hasCellSize() == other.hasCellSize());
if (hasCellSize()) {
result = result && (getCellSize()
== other.getCellSize());
}
result = result && (hasId() == other.hasId());
if (hasId()) {
result = result && (getId()
== other.getId());
}
result = result && (hasState() == other.hasState());
if (hasState()) {
result = result &&
(getState() == other.getState());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasName()) {
hash = (37 * hash) + NAME_FIELD_NUMBER;
hash = (53 * hash) + getName().hashCode();
}
if (hasSchema()) {
hash = (37 * hash) + SCHEMA_FIELD_NUMBER;
hash = (53 * hash) + getSchema().hashCode();
}
if (hasCellSize()) {
hash = (37 * hash) + CELLSIZE_FIELD_NUMBER;
hash = (53 * hash) + getCellSize();
}
if (hasId()) {
hash = (37 * hash) + ID_FIELD_NUMBER;
hash = (53 * hash) + getId();
}
if (hasState()) {
hash = (37 * hash) + STATE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getState());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ErasureCodingPolicyProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ErasureCodingPolicyProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ErasureCodingPolicyProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getSchemaFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
name_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
if (schemaBuilder_ == null) {
schema_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.getDefaultInstance();
} else {
schemaBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
cellSize_ = 0;
bitField0_ = (bitField0_ & ~0x00000004);
id_ = 0;
bitField0_ = (bitField0_ & ~0x00000008);
state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState.ENABLED;
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ErasureCodingPolicyProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.name_ = name_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
if (schemaBuilder_ == null) {
result.schema_ = schema_;
} else {
result.schema_ = schemaBuilder_.build();
}
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.cellSize_ = cellSize_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.id_ = id_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.state_ = state_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance()) return this;
if (other.hasName()) {
bitField0_ |= 0x00000001;
name_ = other.name_;
onChanged();
}
if (other.hasSchema()) {
mergeSchema(other.getSchema());
}
if (other.hasCellSize()) {
setCellSize(other.getCellSize());
}
if (other.hasId()) {
setId(other.getId());
}
if (other.hasState()) {
setState(other.getState());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasId()) {
return false;
}
if (hasSchema()) {
if (!getSchema().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// optional string name = 1;
private java.lang.Object name_ = "";
/**
* optional string name = 1;
*/
public boolean hasName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* optional string name = 1;
*/
public java.lang.String getName() {
java.lang.Object ref = name_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
name_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string name = 1;
*/
public com.google.protobuf.ByteString
getNameBytes() {
java.lang.Object ref = name_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
name_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* optional string name = 1;
*/
public Builder setName(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
name_ = value;
onChanged();
return this;
}
/**
* optional string name = 1;
*/
public Builder clearName() {
bitField0_ = (bitField0_ & ~0x00000001);
name_ = getDefaultInstance().getName();
onChanged();
return this;
}
/**
* optional string name = 1;
*/
public Builder setNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
name_ = value;
onChanged();
return this;
}
// optional .hadoop.hdfs.ECSchemaProto schema = 2;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto schema_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProtoOrBuilder> schemaBuilder_;
/**
* optional .hadoop.hdfs.ECSchemaProto schema = 2;
*/
public boolean hasSchema() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional .hadoop.hdfs.ECSchemaProto schema = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto getSchema() {
if (schemaBuilder_ == null) {
return schema_;
} else {
return schemaBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.ECSchemaProto schema = 2;
*/
public Builder setSchema(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto value) {
if (schemaBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
schema_ = value;
onChanged();
} else {
schemaBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* optional .hadoop.hdfs.ECSchemaProto schema = 2;
*/
public Builder setSchema(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.Builder builderForValue) {
if (schemaBuilder_ == null) {
schema_ = builderForValue.build();
onChanged();
} else {
schemaBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
return this;
}
/**
* optional .hadoop.hdfs.ECSchemaProto schema = 2;
*/
public Builder mergeSchema(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto value) {
if (schemaBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002) &&
schema_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.getDefaultInstance()) {
schema_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.newBuilder(schema_).mergeFrom(value).buildPartial();
} else {
schema_ = value;
}
onChanged();
} else {
schemaBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* optional .hadoop.hdfs.ECSchemaProto schema = 2;
*/
public Builder clearSchema() {
if (schemaBuilder_ == null) {
schema_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.getDefaultInstance();
onChanged();
} else {
schemaBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
/**
* optional .hadoop.hdfs.ECSchemaProto schema = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.Builder getSchemaBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getSchemaFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.ECSchemaProto schema = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProtoOrBuilder getSchemaOrBuilder() {
if (schemaBuilder_ != null) {
return schemaBuilder_.getMessageOrBuilder();
} else {
return schema_;
}
}
/**
* optional .hadoop.hdfs.ECSchemaProto schema = 2;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProtoOrBuilder>
getSchemaFieldBuilder() {
if (schemaBuilder_ == null) {
schemaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProtoOrBuilder>(
schema_,
getParentForChildren(),
isClean());
schema_ = null;
}
return schemaBuilder_;
}
// optional uint32 cellSize = 3;
private int cellSize_ ;
/**
* optional uint32 cellSize = 3;
*/
public boolean hasCellSize() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional uint32 cellSize = 3;
*/
public int getCellSize() {
return cellSize_;
}
/**
* optional uint32 cellSize = 3;
*/
public Builder setCellSize(int value) {
bitField0_ |= 0x00000004;
cellSize_ = value;
onChanged();
return this;
}
/**
* optional uint32 cellSize = 3;
*/
public Builder clearCellSize() {
bitField0_ = (bitField0_ & ~0x00000004);
cellSize_ = 0;
onChanged();
return this;
}
// required uint32 id = 4;
private int id_ ;
/**
* required uint32 id = 4;
*
*
* Actually a byte - only 8 bits used
*
*/
public boolean hasId() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required uint32 id = 4;
*
*
* Actually a byte - only 8 bits used
*
*/
public int getId() {
return id_;
}
/**
* required uint32 id = 4;
*
*
* Actually a byte - only 8 bits used
*
*/
public Builder setId(int value) {
bitField0_ |= 0x00000008;
id_ = value;
onChanged();
return this;
}
/**
* required uint32 id = 4;
*
*
* Actually a byte - only 8 bits used
*
*/
public Builder clearId() {
bitField0_ = (bitField0_ & ~0x00000008);
id_ = 0;
onChanged();
return this;
}
// optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED];
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState.ENABLED;
/**
* optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED];
*/
public boolean hasState() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED];
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState getState() {
return state_;
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED];
*/
public Builder setState(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000010;
state_ = value;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED];
*/
public Builder clearState() {
bitField0_ = (bitField0_ & ~0x00000010);
state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState.ENABLED;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ErasureCodingPolicyProto)
}
static {
defaultInstance = new ErasureCodingPolicyProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ErasureCodingPolicyProto)
}
public interface AddErasureCodingPolicyResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;
*/
boolean hasPolicy();
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getPolicy();
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getPolicyOrBuilder();
// required bool succeed = 2;
/**
* required bool succeed = 2;
*/
boolean hasSucceed();
/**
* required bool succeed = 2;
*/
boolean getSucceed();
// optional string errorMsg = 3;
/**
* optional string errorMsg = 3;
*/
boolean hasErrorMsg();
/**
* optional string errorMsg = 3;
*/
java.lang.String getErrorMsg();
/**
* optional string errorMsg = 3;
*/
com.google.protobuf.ByteString
getErrorMsgBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.AddErasureCodingPolicyResponseProto}
*/
public static final class AddErasureCodingPolicyResponseProto extends
com.google.protobuf.GeneratedMessage
implements AddErasureCodingPolicyResponseProtoOrBuilder {
// Use AddErasureCodingPolicyResponseProto.newBuilder() to construct.
private AddErasureCodingPolicyResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private AddErasureCodingPolicyResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final AddErasureCodingPolicyResponseProto defaultInstance;
public static AddErasureCodingPolicyResponseProto getDefaultInstance() {
return defaultInstance;
}
public AddErasureCodingPolicyResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private AddErasureCodingPolicyResponseProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = policy_.toBuilder();
}
policy_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(policy_);
policy_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 16: {
bitField0_ |= 0x00000002;
succeed_ = input.readBool();
break;
}
case 26: {
bitField0_ |= 0x00000004;
errorMsg_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public AddErasureCodingPolicyResponseProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new AddErasureCodingPolicyResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;
public static final int POLICY_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto policy_;
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;
*/
public boolean hasPolicy() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getPolicy() {
return policy_;
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getPolicyOrBuilder() {
return policy_;
}
// required bool succeed = 2;
public static final int SUCCEED_FIELD_NUMBER = 2;
private boolean succeed_;
/**
* required bool succeed = 2;
*/
public boolean hasSucceed() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required bool succeed = 2;
*/
public boolean getSucceed() {
return succeed_;
}
// optional string errorMsg = 3;
public static final int ERRORMSG_FIELD_NUMBER = 3;
private java.lang.Object errorMsg_;
/**
* optional string errorMsg = 3;
*/
public boolean hasErrorMsg() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional string errorMsg = 3;
*/
public java.lang.String getErrorMsg() {
java.lang.Object ref = errorMsg_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
errorMsg_ = s;
}
return s;
}
}
/**
* optional string errorMsg = 3;
*/
public com.google.protobuf.ByteString
getErrorMsgBytes() {
java.lang.Object ref = errorMsg_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
errorMsg_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
policy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance();
succeed_ = false;
errorMsg_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasPolicy()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasSucceed()) {
memoizedIsInitialized = 0;
return false;
}
if (!getPolicy().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, policy_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBool(2, succeed_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, getErrorMsgBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, policy_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(2, succeed_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, getErrorMsgBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto) obj;
boolean result = true;
result = result && (hasPolicy() == other.hasPolicy());
if (hasPolicy()) {
result = result && getPolicy()
.equals(other.getPolicy());
}
result = result && (hasSucceed() == other.hasSucceed());
if (hasSucceed()) {
result = result && (getSucceed()
== other.getSucceed());
}
result = result && (hasErrorMsg() == other.hasErrorMsg());
if (hasErrorMsg()) {
result = result && getErrorMsg()
.equals(other.getErrorMsg());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasPolicy()) {
hash = (37 * hash) + POLICY_FIELD_NUMBER;
hash = (53 * hash) + getPolicy().hashCode();
}
if (hasSucceed()) {
hash = (37 * hash) + SUCCEED_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getSucceed());
}
if (hasErrorMsg()) {
hash = (37 * hash) + ERRORMSG_FIELD_NUMBER;
hash = (53 * hash) + getErrorMsg().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.AddErasureCodingPolicyResponseProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getPolicyFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (policyBuilder_ == null) {
policy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance();
} else {
policyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
succeed_ = false;
bitField0_ = (bitField0_ & ~0x00000002);
errorMsg_ = "";
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (policyBuilder_ == null) {
result.policy_ = policy_;
} else {
result.policy_ = policyBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.succeed_ = succeed_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.errorMsg_ = errorMsg_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto.getDefaultInstance()) return this;
if (other.hasPolicy()) {
mergePolicy(other.getPolicy());
}
if (other.hasSucceed()) {
setSucceed(other.getSucceed());
}
if (other.hasErrorMsg()) {
bitField0_ |= 0x00000004;
errorMsg_ = other.errorMsg_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasPolicy()) {
return false;
}
if (!hasSucceed()) {
return false;
}
if (!getPolicy().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto policy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> policyBuilder_;
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;
*/
public boolean hasPolicy() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getPolicy() {
if (policyBuilder_ == null) {
return policy_;
} else {
return policyBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;
*/
public Builder setPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) {
if (policyBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
policy_ = value;
onChanged();
} else {
policyBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;
*/
public Builder setPolicy(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder builderForValue) {
if (policyBuilder_ == null) {
policy_ = builderForValue.build();
onChanged();
} else {
policyBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;
*/
public Builder mergePolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) {
if (policyBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
policy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance()) {
policy_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.newBuilder(policy_).mergeFrom(value).buildPartial();
} else {
policy_ = value;
}
onChanged();
} else {
policyBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;
*/
public Builder clearPolicy() {
if (policyBuilder_ == null) {
policy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance();
onChanged();
} else {
policyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder getPolicyBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getPolicyFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getPolicyOrBuilder() {
if (policyBuilder_ != null) {
return policyBuilder_.getMessageOrBuilder();
} else {
return policy_;
}
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder>
getPolicyFieldBuilder() {
if (policyBuilder_ == null) {
policyBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder>(
policy_,
getParentForChildren(),
isClean());
policy_ = null;
}
return policyBuilder_;
}
// required bool succeed = 2;
private boolean succeed_ ;
/**
* required bool succeed = 2;
*/
public boolean hasSucceed() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required bool succeed = 2;
*/
public boolean getSucceed() {
return succeed_;
}
/**
* required bool succeed = 2;
*/
public Builder setSucceed(boolean value) {
bitField0_ |= 0x00000002;
succeed_ = value;
onChanged();
return this;
}
/**
* required bool succeed = 2;
*/
public Builder clearSucceed() {
bitField0_ = (bitField0_ & ~0x00000002);
succeed_ = false;
onChanged();
return this;
}
// optional string errorMsg = 3;
private java.lang.Object errorMsg_ = "";
/**
* optional string errorMsg = 3;
*/
public boolean hasErrorMsg() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional string errorMsg = 3;
*/
public java.lang.String getErrorMsg() {
java.lang.Object ref = errorMsg_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
errorMsg_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string errorMsg = 3;
*/
public com.google.protobuf.ByteString
getErrorMsgBytes() {
java.lang.Object ref = errorMsg_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
errorMsg_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* optional string errorMsg = 3;
*/
public Builder setErrorMsg(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
errorMsg_ = value;
onChanged();
return this;
}
/**
* optional string errorMsg = 3;
*/
public Builder clearErrorMsg() {
bitField0_ = (bitField0_ & ~0x00000004);
errorMsg_ = getDefaultInstance().getErrorMsg();
onChanged();
return this;
}
/**
* optional string errorMsg = 3;
*/
public Builder setErrorMsgBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
errorMsg_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.AddErasureCodingPolicyResponseProto)
}
static {
defaultInstance = new AddErasureCodingPolicyResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.AddErasureCodingPolicyResponseProto)
}
public interface HdfsPathHandleProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// optional uint64 inodeId = 1;
/**
* optional uint64 inodeId = 1;
*/
boolean hasInodeId();
/**
* optional uint64 inodeId = 1;
*/
long getInodeId();
// optional uint64 mtime = 2;
/**
* optional uint64 mtime = 2;
*/
boolean hasMtime();
/**
* optional uint64 mtime = 2;
*/
long getMtime();
// optional string path = 3;
/**
* optional string path = 3;
*/
boolean hasPath();
/**
* optional string path = 3;
*/
java.lang.String getPath();
/**
* optional string path = 3;
*/
com.google.protobuf.ByteString
getPathBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.HdfsPathHandleProto}
*
*
**
* Placeholder type for consistent HDFS operations.
*
*/
public static final class HdfsPathHandleProto extends
com.google.protobuf.GeneratedMessage
implements HdfsPathHandleProtoOrBuilder {
// Use HdfsPathHandleProto.newBuilder() to construct.
private HdfsPathHandleProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private HdfsPathHandleProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final HdfsPathHandleProto defaultInstance;
public static HdfsPathHandleProto getDefaultInstance() {
return defaultInstance;
}
public HdfsPathHandleProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private HdfsPathHandleProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
inodeId_ = input.readUInt64();
break;
}
case 16: {
bitField0_ |= 0x00000002;
mtime_ = input.readUInt64();
break;
}
case 26: {
bitField0_ |= 0x00000004;
path_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsPathHandleProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsPathHandleProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public HdfsPathHandleProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new HdfsPathHandleProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// optional uint64 inodeId = 1;
public static final int INODEID_FIELD_NUMBER = 1;
private long inodeId_;
/**
* optional uint64 inodeId = 1;
*/
public boolean hasInodeId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* optional uint64 inodeId = 1;
*/
public long getInodeId() {
return inodeId_;
}
// optional uint64 mtime = 2;
public static final int MTIME_FIELD_NUMBER = 2;
private long mtime_;
/**
* optional uint64 mtime = 2;
*/
public boolean hasMtime() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional uint64 mtime = 2;
*/
public long getMtime() {
return mtime_;
}
// optional string path = 3;
public static final int PATH_FIELD_NUMBER = 3;
private java.lang.Object path_;
/**
* optional string path = 3;
*/
public boolean hasPath() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional string path = 3;
*/
public java.lang.String getPath() {
java.lang.Object ref = path_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
path_ = s;
}
return s;
}
}
/**
* optional string path = 3;
*/
public com.google.protobuf.ByteString
getPathBytes() {
java.lang.Object ref = path_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
path_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
inodeId_ = 0L;
mtime_ = 0L;
path_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt64(1, inodeId_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, mtime_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, getPathBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(1, inodeId_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, mtime_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, getPathBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto) obj;
boolean result = true;
result = result && (hasInodeId() == other.hasInodeId());
if (hasInodeId()) {
result = result && (getInodeId()
== other.getInodeId());
}
result = result && (hasMtime() == other.hasMtime());
if (hasMtime()) {
result = result && (getMtime()
== other.getMtime());
}
result = result && (hasPath() == other.hasPath());
if (hasPath()) {
result = result && getPath()
.equals(other.getPath());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasInodeId()) {
hash = (37 * hash) + INODEID_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getInodeId());
}
if (hasMtime()) {
hash = (37 * hash) + MTIME_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getMtime());
}
if (hasPath()) {
hash = (37 * hash) + PATH_FIELD_NUMBER;
hash = (53 * hash) + getPath().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.HdfsPathHandleProto}
*
*
**
* Placeholder type for consistent HDFS operations.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsPathHandleProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsPathHandleProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
inodeId_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
mtime_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
path_ = "";
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsPathHandleProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.inodeId_ = inodeId_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.mtime_ = mtime_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.path_ = path_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto.getDefaultInstance()) return this;
if (other.hasInodeId()) {
setInodeId(other.getInodeId());
}
if (other.hasMtime()) {
setMtime(other.getMtime());
}
if (other.hasPath()) {
bitField0_ |= 0x00000004;
path_ = other.path_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// optional uint64 inodeId = 1;
private long inodeId_ ;
/**
* optional uint64 inodeId = 1;
*/
public boolean hasInodeId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* optional uint64 inodeId = 1;
*/
public long getInodeId() {
return inodeId_;
}
/**
* optional uint64 inodeId = 1;
*/
public Builder setInodeId(long value) {
bitField0_ |= 0x00000001;
inodeId_ = value;
onChanged();
return this;
}
/**
* optional uint64 inodeId = 1;
*/
public Builder clearInodeId() {
bitField0_ = (bitField0_ & ~0x00000001);
inodeId_ = 0L;
onChanged();
return this;
}
// optional uint64 mtime = 2;
private long mtime_ ;
/**
* optional uint64 mtime = 2;
*/
public boolean hasMtime() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional uint64 mtime = 2;
*/
public long getMtime() {
return mtime_;
}
/**
* optional uint64 mtime = 2;
*/
public Builder setMtime(long value) {
bitField0_ |= 0x00000002;
mtime_ = value;
onChanged();
return this;
}
/**
* optional uint64 mtime = 2;
*/
public Builder clearMtime() {
bitField0_ = (bitField0_ & ~0x00000002);
mtime_ = 0L;
onChanged();
return this;
}
// optional string path = 3;
private java.lang.Object path_ = "";
/**
* optional string path = 3;
*/
public boolean hasPath() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional string path = 3;
*/
public java.lang.String getPath() {
java.lang.Object ref = path_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
path_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string path = 3;
*/
public com.google.protobuf.ByteString
getPathBytes() {
java.lang.Object ref = path_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
path_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* optional string path = 3;
*/
public Builder setPath(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
path_ = value;
onChanged();
return this;
}
/**
* optional string path = 3;
*/
public Builder clearPath() {
bitField0_ = (bitField0_ & ~0x00000004);
path_ = getDefaultInstance().getPath();
onChanged();
return this;
}
/**
* optional string path = 3;
*/
public Builder setPathBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
path_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.HdfsPathHandleProto)
}
static {
defaultInstance = new HdfsPathHandleProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.HdfsPathHandleProto)
}
public interface HdfsFileStatusProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1;
/**
* required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1;
*/
boolean hasFileType();
/**
* required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType getFileType();
// required bytes path = 2;
/**
* required bytes path = 2;
*
*
* local name of inode encoded java UTF8
*
*/
boolean hasPath();
/**
* required bytes path = 2;
*
*
* local name of inode encoded java UTF8
*
*/
com.google.protobuf.ByteString getPath();
// required uint64 length = 3;
/**
* required uint64 length = 3;
*/
boolean hasLength();
/**
* required uint64 length = 3;
*/
long getLength();
// required .hadoop.hdfs.FsPermissionProto permission = 4;
/**
* required .hadoop.hdfs.FsPermissionProto permission = 4;
*/
boolean hasPermission();
/**
* required .hadoop.hdfs.FsPermissionProto permission = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getPermission();
/**
* required .hadoop.hdfs.FsPermissionProto permission = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder();
// required string owner = 5;
/**
* required string owner = 5;
*/
boolean hasOwner();
/**
* required string owner = 5;
*/
java.lang.String getOwner();
/**
* required string owner = 5;
*/
com.google.protobuf.ByteString
getOwnerBytes();
// required string group = 6;
/**
* required string group = 6;
*/
boolean hasGroup();
/**
* required string group = 6;
*/
java.lang.String getGroup();
/**
* required string group = 6;
*/
com.google.protobuf.ByteString
getGroupBytes();
// required uint64 modification_time = 7;
/**
* required uint64 modification_time = 7;
*/
boolean hasModificationTime();
/**
* required uint64 modification_time = 7;
*/
long getModificationTime();
// required uint64 access_time = 8;
/**
* required uint64 access_time = 8;
*/
boolean hasAccessTime();
/**
* required uint64 access_time = 8;
*/
long getAccessTime();
// optional bytes symlink = 9;
/**
* optional bytes symlink = 9;
*
*
* Optional fields for symlink
*
*/
boolean hasSymlink();
/**
* optional bytes symlink = 9;
*
*
* Optional fields for symlink
*
*/
com.google.protobuf.ByteString getSymlink();
// optional uint32 block_replication = 10 [default = 0];
/**
* optional uint32 block_replication = 10 [default = 0];
*
*
* Optional fields for file
*
*/
boolean hasBlockReplication();
/**
* optional uint32 block_replication = 10 [default = 0];
*
*
* Optional fields for file
*
*/
int getBlockReplication();
// optional uint64 blocksize = 11 [default = 0];
/**
* optional uint64 blocksize = 11 [default = 0];
*/
boolean hasBlocksize();
/**
* optional uint64 blocksize = 11 [default = 0];
*/
long getBlocksize();
// optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
/**
* optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
*
*
* suppled only if asked by client
*
*/
boolean hasLocations();
/**
* optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
*
*
* suppled only if asked by client
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getLocations();
/**
* optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
*
*
* suppled only if asked by client
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder getLocationsOrBuilder();
// optional uint64 fileId = 13 [default = 0];
/**
* optional uint64 fileId = 13 [default = 0];
*
*
* Optional field for fileId
*
*/
boolean hasFileId();
/**
* optional uint64 fileId = 13 [default = 0];
*
*
* Optional field for fileId
*
*/
long getFileId();
// optional int32 childrenNum = 14 [default = -1];
/**
* optional int32 childrenNum = 14 [default = -1];
*/
boolean hasChildrenNum();
/**
* optional int32 childrenNum = 14 [default = -1];
*/
int getChildrenNum();
// optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
*
*
* Optional field for file encryption
*
*/
boolean hasFileEncryptionInfo();
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
*
*
* Optional field for file encryption
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo();
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
*
*
* Optional field for file encryption
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder();
// optional uint32 storagePolicy = 16 [default = 0];
/**
* optional uint32 storagePolicy = 16 [default = 0];
*
*
* block storage policy id
*
*/
boolean hasStoragePolicy();
/**
* optional uint32 storagePolicy = 16 [default = 0];
*
*
* block storage policy id
*
*/
int getStoragePolicy();
// optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;
*
*
* Optional field for erasure coding
*
*/
boolean hasEcPolicy();
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;
*
*
* Optional field for erasure coding
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy();
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;
*
*
* Optional field for erasure coding
*
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder();
// optional uint32 flags = 18 [default = 0];
/**
* optional uint32 flags = 18 [default = 0];
*
*
* Set of flags
*
*/
boolean hasFlags();
/**
* optional uint32 flags = 18 [default = 0];
*
*
* Set of flags
*
*/
int getFlags();
}
/**
* Protobuf type {@code hadoop.hdfs.HdfsFileStatusProto}
*
*
**
* Status of a file, directory or symlink
* Optionally includes a file's block locations if requested by client on the rpc call.
*
*/
public static final class HdfsFileStatusProto extends
com.google.protobuf.GeneratedMessage
implements HdfsFileStatusProtoOrBuilder {
// Use HdfsFileStatusProto.newBuilder() to construct.
private HdfsFileStatusProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private HdfsFileStatusProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final HdfsFileStatusProto defaultInstance;
public static HdfsFileStatusProto getDefaultInstance() {
return defaultInstance;
}
public HdfsFileStatusProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private HdfsFileStatusProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
fileType_ = value;
}
break;
}
case 18: {
bitField0_ |= 0x00000002;
path_ = input.readBytes();
break;
}
case 24: {
bitField0_ |= 0x00000004;
length_ = input.readUInt64();
break;
}
case 34: {
org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000008) == 0x00000008)) {
subBuilder = permission_.toBuilder();
}
permission_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(permission_);
permission_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000008;
break;
}
case 42: {
bitField0_ |= 0x00000010;
owner_ = input.readBytes();
break;
}
case 50: {
bitField0_ |= 0x00000020;
group_ = input.readBytes();
break;
}
case 56: {
bitField0_ |= 0x00000040;
modificationTime_ = input.readUInt64();
break;
}
case 64: {
bitField0_ |= 0x00000080;
accessTime_ = input.readUInt64();
break;
}
case 74: {
bitField0_ |= 0x00000100;
symlink_ = input.readBytes();
break;
}
case 80: {
bitField0_ |= 0x00000200;
blockReplication_ = input.readUInt32();
break;
}
case 88: {
bitField0_ |= 0x00000400;
blocksize_ = input.readUInt64();
break;
}
case 98: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000800) == 0x00000800)) {
subBuilder = locations_.toBuilder();
}
locations_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(locations_);
locations_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000800;
break;
}
case 104: {
bitField0_ |= 0x00001000;
fileId_ = input.readUInt64();
break;
}
case 112: {
bitField0_ |= 0x00002000;
childrenNum_ = input.readInt32();
break;
}
case 122: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder subBuilder = null;
if (((bitField0_ & 0x00004000) == 0x00004000)) {
subBuilder = fileEncryptionInfo_.toBuilder();
}
fileEncryptionInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(fileEncryptionInfo_);
fileEncryptionInfo_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00004000;
break;
}
case 128: {
bitField0_ |= 0x00008000;
storagePolicy_ = input.readUInt32();
break;
}
case 138: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder subBuilder = null;
if (((bitField0_ & 0x00010000) == 0x00010000)) {
subBuilder = ecPolicy_.toBuilder();
}
ecPolicy_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(ecPolicy_);
ecPolicy_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00010000;
break;
}
case 144: {
bitField0_ |= 0x00020000;
flags_ = input.readUInt32();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsFileStatusProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsFileStatusProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public HdfsFileStatusProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new HdfsFileStatusProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
/**
* Protobuf enum {@code hadoop.hdfs.HdfsFileStatusProto.FileType}
*/
public enum FileType
implements com.google.protobuf.ProtocolMessageEnum {
/**
* IS_DIR = 1;
*/
IS_DIR(0, 1),
/**
* IS_FILE = 2;
*/
IS_FILE(1, 2),
/**
* IS_SYMLINK = 3;
*/
IS_SYMLINK(2, 3),
;
/**
* IS_DIR = 1;
*/
public static final int IS_DIR_VALUE = 1;
/**
* IS_FILE = 2;
*/
public static final int IS_FILE_VALUE = 2;
/**
* IS_SYMLINK = 3;
*/
public static final int IS_SYMLINK_VALUE = 3;
public final int getNumber() { return value; }
public static FileType valueOf(int value) {
switch (value) {
case 1: return IS_DIR;
case 2: return IS_FILE;
case 3: return IS_SYMLINK;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public FileType findValueByNumber(int number) {
return FileType.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDescriptor().getEnumTypes().get(0);
}
private static final FileType[] VALUES = values();
public static FileType valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private FileType(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.HdfsFileStatusProto.FileType)
}
/**
* Protobuf enum {@code hadoop.hdfs.HdfsFileStatusProto.Flags}
*/
public enum Flags
implements com.google.protobuf.ProtocolMessageEnum {
/**
* HAS_ACL = 1;
*
*
* has ACLs
*
*/
HAS_ACL(0, 1),
/**
* HAS_CRYPT = 2;
*
*
* encrypted
*
*/
HAS_CRYPT(1, 2),
/**
* HAS_EC = 4;
*
*
* erasure coded
*
*/
HAS_EC(2, 4),
/**
* SNAPSHOT_ENABLED = 8;
*
*
* SNAPSHOT ENABLED
*
*/
SNAPSHOT_ENABLED(3, 8),
;
/**
* HAS_ACL = 1;
*
*
* has ACLs
*
*/
public static final int HAS_ACL_VALUE = 1;
/**
* HAS_CRYPT = 2;
*
*
* encrypted
*
*/
public static final int HAS_CRYPT_VALUE = 2;
/**
* HAS_EC = 4;
*
*
* erasure coded
*
*/
public static final int HAS_EC_VALUE = 4;
/**
* SNAPSHOT_ENABLED = 8;
*
*
* SNAPSHOT ENABLED
*
*/
public static final int SNAPSHOT_ENABLED_VALUE = 8;
public final int getNumber() { return value; }
public static Flags valueOf(int value) {
switch (value) {
case 1: return HAS_ACL;
case 2: return HAS_CRYPT;
case 4: return HAS_EC;
case 8: return SNAPSHOT_ENABLED;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public Flags findValueByNumber(int number) {
return Flags.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDescriptor().getEnumTypes().get(1);
}
private static final Flags[] VALUES = values();
public static Flags valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private Flags(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.HdfsFileStatusProto.Flags)
}
private int bitField0_;
// required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1;
public static final int FILETYPE_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType fileType_;
/**
* required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1;
*/
public boolean hasFileType() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType getFileType() {
return fileType_;
}
// required bytes path = 2;
public static final int PATH_FIELD_NUMBER = 2;
private com.google.protobuf.ByteString path_;
/**
* required bytes path = 2;
*
*
* local name of inode encoded java UTF8
*
*/
public boolean hasPath() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required bytes path = 2;
*
*
* local name of inode encoded java UTF8
*
*/
public com.google.protobuf.ByteString getPath() {
return path_;
}
// required uint64 length = 3;
public static final int LENGTH_FIELD_NUMBER = 3;
private long length_;
/**
* required uint64 length = 3;
*/
public boolean hasLength() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 length = 3;
*/
public long getLength() {
return length_;
}
// required .hadoop.hdfs.FsPermissionProto permission = 4;
public static final int PERMISSION_FIELD_NUMBER = 4;
private org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto permission_;
/**
* required .hadoop.hdfs.FsPermissionProto permission = 4;
*/
public boolean hasPermission() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required .hadoop.hdfs.FsPermissionProto permission = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getPermission() {
return permission_;
}
/**
* required .hadoop.hdfs.FsPermissionProto permission = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder() {
return permission_;
}
// required string owner = 5;
public static final int OWNER_FIELD_NUMBER = 5;
private java.lang.Object owner_;
/**
* required string owner = 5;
*/
public boolean hasOwner() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required string owner = 5;
*/
public java.lang.String getOwner() {
java.lang.Object ref = owner_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
owner_ = s;
}
return s;
}
}
/**
* required string owner = 5;
*/
public com.google.protobuf.ByteString
getOwnerBytes() {
java.lang.Object ref = owner_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
owner_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required string group = 6;
public static final int GROUP_FIELD_NUMBER = 6;
private java.lang.Object group_;
/**
* required string group = 6;
*/
public boolean hasGroup() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* required string group = 6;
*/
public java.lang.String getGroup() {
java.lang.Object ref = group_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
group_ = s;
}
return s;
}
}
/**
* required string group = 6;
*/
public com.google.protobuf.ByteString
getGroupBytes() {
java.lang.Object ref = group_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
group_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required uint64 modification_time = 7;
public static final int MODIFICATION_TIME_FIELD_NUMBER = 7;
private long modificationTime_;
/**
* required uint64 modification_time = 7;
*/
public boolean hasModificationTime() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* required uint64 modification_time = 7;
*/
public long getModificationTime() {
return modificationTime_;
}
// required uint64 access_time = 8;
public static final int ACCESS_TIME_FIELD_NUMBER = 8;
private long accessTime_;
/**
* required uint64 access_time = 8;
*/
public boolean hasAccessTime() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* required uint64 access_time = 8;
*/
public long getAccessTime() {
return accessTime_;
}
// optional bytes symlink = 9;
public static final int SYMLINK_FIELD_NUMBER = 9;
private com.google.protobuf.ByteString symlink_;
/**
* optional bytes symlink = 9;
*
*
* Optional fields for symlink
*
*/
public boolean hasSymlink() {
return ((bitField0_ & 0x00000100) == 0x00000100);
}
/**
* optional bytes symlink = 9;
*
*
* Optional fields for symlink
*
*/
public com.google.protobuf.ByteString getSymlink() {
return symlink_;
}
// optional uint32 block_replication = 10 [default = 0];
public static final int BLOCK_REPLICATION_FIELD_NUMBER = 10;
private int blockReplication_;
/**
* optional uint32 block_replication = 10 [default = 0];
*
*
* Optional fields for file
*
*/
public boolean hasBlockReplication() {
return ((bitField0_ & 0x00000200) == 0x00000200);
}
/**
* optional uint32 block_replication = 10 [default = 0];
*
*
* Optional fields for file
*
*/
public int getBlockReplication() {
return blockReplication_;
}
// optional uint64 blocksize = 11 [default = 0];
public static final int BLOCKSIZE_FIELD_NUMBER = 11;
private long blocksize_;
/**
* optional uint64 blocksize = 11 [default = 0];
*/
public boolean hasBlocksize() {
return ((bitField0_ & 0x00000400) == 0x00000400);
}
/**
* optional uint64 blocksize = 11 [default = 0];
*/
public long getBlocksize() {
return blocksize_;
}
// optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
public static final int LOCATIONS_FIELD_NUMBER = 12;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto locations_;
/**
* optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
*
*
* suppled only if asked by client
*
*/
public boolean hasLocations() {
return ((bitField0_ & 0x00000800) == 0x00000800);
}
/**
* optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
*
*
* suppled only if asked by client
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getLocations() {
return locations_;
}
/**
* optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
*
*
* suppled only if asked by client
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder getLocationsOrBuilder() {
return locations_;
}
// optional uint64 fileId = 13 [default = 0];
public static final int FILEID_FIELD_NUMBER = 13;
private long fileId_;
/**
* optional uint64 fileId = 13 [default = 0];
*
*
* Optional field for fileId
*
*/
public boolean hasFileId() {
return ((bitField0_ & 0x00001000) == 0x00001000);
}
/**
* optional uint64 fileId = 13 [default = 0];
*
*
* Optional field for fileId
*
*/
public long getFileId() {
return fileId_;
}
// optional int32 childrenNum = 14 [default = -1];
public static final int CHILDRENNUM_FIELD_NUMBER = 14;
private int childrenNum_;
/**
* optional int32 childrenNum = 14 [default = -1];
*/
public boolean hasChildrenNum() {
return ((bitField0_ & 0x00002000) == 0x00002000);
}
/**
* optional int32 childrenNum = 14 [default = -1];
*/
public int getChildrenNum() {
return childrenNum_;
}
// optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
public static final int FILEENCRYPTIONINFO_FIELD_NUMBER = 15;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto fileEncryptionInfo_;
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
*
*
* Optional field for file encryption
*
*/
public boolean hasFileEncryptionInfo() {
return ((bitField0_ & 0x00004000) == 0x00004000);
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
*
*
* Optional field for file encryption
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo() {
return fileEncryptionInfo_;
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
*
*
* Optional field for file encryption
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder() {
return fileEncryptionInfo_;
}
// optional uint32 storagePolicy = 16 [default = 0];
public static final int STORAGEPOLICY_FIELD_NUMBER = 16;
private int storagePolicy_;
/**
* optional uint32 storagePolicy = 16 [default = 0];
*
*
* block storage policy id
*
*/
public boolean hasStoragePolicy() {
return ((bitField0_ & 0x00008000) == 0x00008000);
}
/**
* optional uint32 storagePolicy = 16 [default = 0];
*
*
* block storage policy id
*
*/
public int getStoragePolicy() {
return storagePolicy_;
}
// optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;
public static final int ECPOLICY_FIELD_NUMBER = 17;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto ecPolicy_;
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;
*
*
* Optional field for erasure coding
*
*/
public boolean hasEcPolicy() {
return ((bitField0_ & 0x00010000) == 0x00010000);
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;
*
*
* Optional field for erasure coding
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy() {
return ecPolicy_;
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;
*
*
* Optional field for erasure coding
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder() {
return ecPolicy_;
}
// optional uint32 flags = 18 [default = 0];
public static final int FLAGS_FIELD_NUMBER = 18;
private int flags_;
/**
* optional uint32 flags = 18 [default = 0];
*
*
* Set of flags
*
*/
public boolean hasFlags() {
return ((bitField0_ & 0x00020000) == 0x00020000);
}
/**
* optional uint32 flags = 18 [default = 0];
*
*
* Set of flags
*
*/
public int getFlags() {
return flags_;
}
private void initFields() {
fileType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.IS_DIR;
path_ = com.google.protobuf.ByteString.EMPTY;
length_ = 0L;
permission_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance();
owner_ = "";
group_ = "";
modificationTime_ = 0L;
accessTime_ = 0L;
symlink_ = com.google.protobuf.ByteString.EMPTY;
blockReplication_ = 0;
blocksize_ = 0L;
locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance();
fileId_ = 0L;
childrenNum_ = -1;
fileEncryptionInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance();
storagePolicy_ = 0;
ecPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance();
flags_ = 0;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasFileType()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasPath()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasLength()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasPermission()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasOwner()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasGroup()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasModificationTime()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasAccessTime()) {
memoizedIsInitialized = 0;
return false;
}
if (!getPermission().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
if (hasLocations()) {
if (!getLocations().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasFileEncryptionInfo()) {
if (!getFileEncryptionInfo().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasEcPolicy()) {
if (!getEcPolicy().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, fileType_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, path_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt64(3, length_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeMessage(4, permission_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeBytes(5, getOwnerBytes());
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeBytes(6, getGroupBytes());
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeUInt64(7, modificationTime_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
output.writeUInt64(8, accessTime_);
}
if (((bitField0_ & 0x00000100) == 0x00000100)) {
output.writeBytes(9, symlink_);
}
if (((bitField0_ & 0x00000200) == 0x00000200)) {
output.writeUInt32(10, blockReplication_);
}
if (((bitField0_ & 0x00000400) == 0x00000400)) {
output.writeUInt64(11, blocksize_);
}
if (((bitField0_ & 0x00000800) == 0x00000800)) {
output.writeMessage(12, locations_);
}
if (((bitField0_ & 0x00001000) == 0x00001000)) {
output.writeUInt64(13, fileId_);
}
if (((bitField0_ & 0x00002000) == 0x00002000)) {
output.writeInt32(14, childrenNum_);
}
if (((bitField0_ & 0x00004000) == 0x00004000)) {
output.writeMessage(15, fileEncryptionInfo_);
}
if (((bitField0_ & 0x00008000) == 0x00008000)) {
output.writeUInt32(16, storagePolicy_);
}
if (((bitField0_ & 0x00010000) == 0x00010000)) {
output.writeMessage(17, ecPolicy_);
}
if (((bitField0_ & 0x00020000) == 0x00020000)) {
output.writeUInt32(18, flags_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, fileType_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, path_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(3, length_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(4, permission_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(5, getOwnerBytes());
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(6, getGroupBytes());
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(7, modificationTime_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(8, accessTime_);
}
if (((bitField0_ & 0x00000100) == 0x00000100)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(9, symlink_);
}
if (((bitField0_ & 0x00000200) == 0x00000200)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(10, blockReplication_);
}
if (((bitField0_ & 0x00000400) == 0x00000400)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(11, blocksize_);
}
if (((bitField0_ & 0x00000800) == 0x00000800)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(12, locations_);
}
if (((bitField0_ & 0x00001000) == 0x00001000)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(13, fileId_);
}
if (((bitField0_ & 0x00002000) == 0x00002000)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(14, childrenNum_);
}
if (((bitField0_ & 0x00004000) == 0x00004000)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(15, fileEncryptionInfo_);
}
if (((bitField0_ & 0x00008000) == 0x00008000)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(16, storagePolicy_);
}
if (((bitField0_ & 0x00010000) == 0x00010000)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(17, ecPolicy_);
}
if (((bitField0_ & 0x00020000) == 0x00020000)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(18, flags_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto) obj;
boolean result = true;
result = result && (hasFileType() == other.hasFileType());
if (hasFileType()) {
result = result &&
(getFileType() == other.getFileType());
}
result = result && (hasPath() == other.hasPath());
if (hasPath()) {
result = result && getPath()
.equals(other.getPath());
}
result = result && (hasLength() == other.hasLength());
if (hasLength()) {
result = result && (getLength()
== other.getLength());
}
result = result && (hasPermission() == other.hasPermission());
if (hasPermission()) {
result = result && getPermission()
.equals(other.getPermission());
}
result = result && (hasOwner() == other.hasOwner());
if (hasOwner()) {
result = result && getOwner()
.equals(other.getOwner());
}
result = result && (hasGroup() == other.hasGroup());
if (hasGroup()) {
result = result && getGroup()
.equals(other.getGroup());
}
result = result && (hasModificationTime() == other.hasModificationTime());
if (hasModificationTime()) {
result = result && (getModificationTime()
== other.getModificationTime());
}
result = result && (hasAccessTime() == other.hasAccessTime());
if (hasAccessTime()) {
result = result && (getAccessTime()
== other.getAccessTime());
}
result = result && (hasSymlink() == other.hasSymlink());
if (hasSymlink()) {
result = result && getSymlink()
.equals(other.getSymlink());
}
result = result && (hasBlockReplication() == other.hasBlockReplication());
if (hasBlockReplication()) {
result = result && (getBlockReplication()
== other.getBlockReplication());
}
result = result && (hasBlocksize() == other.hasBlocksize());
if (hasBlocksize()) {
result = result && (getBlocksize()
== other.getBlocksize());
}
result = result && (hasLocations() == other.hasLocations());
if (hasLocations()) {
result = result && getLocations()
.equals(other.getLocations());
}
result = result && (hasFileId() == other.hasFileId());
if (hasFileId()) {
result = result && (getFileId()
== other.getFileId());
}
result = result && (hasChildrenNum() == other.hasChildrenNum());
if (hasChildrenNum()) {
result = result && (getChildrenNum()
== other.getChildrenNum());
}
result = result && (hasFileEncryptionInfo() == other.hasFileEncryptionInfo());
if (hasFileEncryptionInfo()) {
result = result && getFileEncryptionInfo()
.equals(other.getFileEncryptionInfo());
}
result = result && (hasStoragePolicy() == other.hasStoragePolicy());
if (hasStoragePolicy()) {
result = result && (getStoragePolicy()
== other.getStoragePolicy());
}
result = result && (hasEcPolicy() == other.hasEcPolicy());
if (hasEcPolicy()) {
result = result && getEcPolicy()
.equals(other.getEcPolicy());
}
result = result && (hasFlags() == other.hasFlags());
if (hasFlags()) {
result = result && (getFlags()
== other.getFlags());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasFileType()) {
hash = (37 * hash) + FILETYPE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getFileType());
}
if (hasPath()) {
hash = (37 * hash) + PATH_FIELD_NUMBER;
hash = (53 * hash) + getPath().hashCode();
}
if (hasLength()) {
hash = (37 * hash) + LENGTH_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getLength());
}
if (hasPermission()) {
hash = (37 * hash) + PERMISSION_FIELD_NUMBER;
hash = (53 * hash) + getPermission().hashCode();
}
if (hasOwner()) {
hash = (37 * hash) + OWNER_FIELD_NUMBER;
hash = (53 * hash) + getOwner().hashCode();
}
if (hasGroup()) {
hash = (37 * hash) + GROUP_FIELD_NUMBER;
hash = (53 * hash) + getGroup().hashCode();
}
if (hasModificationTime()) {
hash = (37 * hash) + MODIFICATION_TIME_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getModificationTime());
}
if (hasAccessTime()) {
hash = (37 * hash) + ACCESS_TIME_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getAccessTime());
}
if (hasSymlink()) {
hash = (37 * hash) + SYMLINK_FIELD_NUMBER;
hash = (53 * hash) + getSymlink().hashCode();
}
if (hasBlockReplication()) {
hash = (37 * hash) + BLOCK_REPLICATION_FIELD_NUMBER;
hash = (53 * hash) + getBlockReplication();
}
if (hasBlocksize()) {
hash = (37 * hash) + BLOCKSIZE_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getBlocksize());
}
if (hasLocations()) {
hash = (37 * hash) + LOCATIONS_FIELD_NUMBER;
hash = (53 * hash) + getLocations().hashCode();
}
if (hasFileId()) {
hash = (37 * hash) + FILEID_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getFileId());
}
if (hasChildrenNum()) {
hash = (37 * hash) + CHILDRENNUM_FIELD_NUMBER;
hash = (53 * hash) + getChildrenNum();
}
if (hasFileEncryptionInfo()) {
hash = (37 * hash) + FILEENCRYPTIONINFO_FIELD_NUMBER;
hash = (53 * hash) + getFileEncryptionInfo().hashCode();
}
if (hasStoragePolicy()) {
hash = (37 * hash) + STORAGEPOLICY_FIELD_NUMBER;
hash = (53 * hash) + getStoragePolicy();
}
if (hasEcPolicy()) {
hash = (37 * hash) + ECPOLICY_FIELD_NUMBER;
hash = (53 * hash) + getEcPolicy().hashCode();
}
if (hasFlags()) {
hash = (37 * hash) + FLAGS_FIELD_NUMBER;
hash = (53 * hash) + getFlags();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.HdfsFileStatusProto}
*
*
**
* Status of a file, directory or symlink
* Optionally includes a file's block locations if requested by client on the rpc call.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsFileStatusProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsFileStatusProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getPermissionFieldBuilder();
getLocationsFieldBuilder();
getFileEncryptionInfoFieldBuilder();
getEcPolicyFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
fileType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.IS_DIR;
bitField0_ = (bitField0_ & ~0x00000001);
path_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000002);
length_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
if (permissionBuilder_ == null) {
permission_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance();
} else {
permissionBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
owner_ = "";
bitField0_ = (bitField0_ & ~0x00000010);
group_ = "";
bitField0_ = (bitField0_ & ~0x00000020);
modificationTime_ = 0L;
bitField0_ = (bitField0_ & ~0x00000040);
accessTime_ = 0L;
bitField0_ = (bitField0_ & ~0x00000080);
symlink_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000100);
blockReplication_ = 0;
bitField0_ = (bitField0_ & ~0x00000200);
blocksize_ = 0L;
bitField0_ = (bitField0_ & ~0x00000400);
if (locationsBuilder_ == null) {
locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance();
} else {
locationsBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000800);
fileId_ = 0L;
bitField0_ = (bitField0_ & ~0x00001000);
childrenNum_ = -1;
bitField0_ = (bitField0_ & ~0x00002000);
if (fileEncryptionInfoBuilder_ == null) {
fileEncryptionInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance();
} else {
fileEncryptionInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00004000);
storagePolicy_ = 0;
bitField0_ = (bitField0_ & ~0x00008000);
if (ecPolicyBuilder_ == null) {
ecPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance();
} else {
ecPolicyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00010000);
flags_ = 0;
bitField0_ = (bitField0_ & ~0x00020000);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsFileStatusProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.fileType_ = fileType_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.path_ = path_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.length_ = length_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
if (permissionBuilder_ == null) {
result.permission_ = permission_;
} else {
result.permission_ = permissionBuilder_.build();
}
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.owner_ = owner_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000020;
}
result.group_ = group_;
if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
to_bitField0_ |= 0x00000040;
}
result.modificationTime_ = modificationTime_;
if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
to_bitField0_ |= 0x00000080;
}
result.accessTime_ = accessTime_;
if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
to_bitField0_ |= 0x00000100;
}
result.symlink_ = symlink_;
if (((from_bitField0_ & 0x00000200) == 0x00000200)) {
to_bitField0_ |= 0x00000200;
}
result.blockReplication_ = blockReplication_;
if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
to_bitField0_ |= 0x00000400;
}
result.blocksize_ = blocksize_;
if (((from_bitField0_ & 0x00000800) == 0x00000800)) {
to_bitField0_ |= 0x00000800;
}
if (locationsBuilder_ == null) {
result.locations_ = locations_;
} else {
result.locations_ = locationsBuilder_.build();
}
if (((from_bitField0_ & 0x00001000) == 0x00001000)) {
to_bitField0_ |= 0x00001000;
}
result.fileId_ = fileId_;
if (((from_bitField0_ & 0x00002000) == 0x00002000)) {
to_bitField0_ |= 0x00002000;
}
result.childrenNum_ = childrenNum_;
if (((from_bitField0_ & 0x00004000) == 0x00004000)) {
to_bitField0_ |= 0x00004000;
}
if (fileEncryptionInfoBuilder_ == null) {
result.fileEncryptionInfo_ = fileEncryptionInfo_;
} else {
result.fileEncryptionInfo_ = fileEncryptionInfoBuilder_.build();
}
if (((from_bitField0_ & 0x00008000) == 0x00008000)) {
to_bitField0_ |= 0x00008000;
}
result.storagePolicy_ = storagePolicy_;
if (((from_bitField0_ & 0x00010000) == 0x00010000)) {
to_bitField0_ |= 0x00010000;
}
if (ecPolicyBuilder_ == null) {
result.ecPolicy_ = ecPolicy_;
} else {
result.ecPolicy_ = ecPolicyBuilder_.build();
}
if (((from_bitField0_ & 0x00020000) == 0x00020000)) {
to_bitField0_ |= 0x00020000;
}
result.flags_ = flags_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()) return this;
if (other.hasFileType()) {
setFileType(other.getFileType());
}
if (other.hasPath()) {
setPath(other.getPath());
}
if (other.hasLength()) {
setLength(other.getLength());
}
if (other.hasPermission()) {
mergePermission(other.getPermission());
}
if (other.hasOwner()) {
bitField0_ |= 0x00000010;
owner_ = other.owner_;
onChanged();
}
if (other.hasGroup()) {
bitField0_ |= 0x00000020;
group_ = other.group_;
onChanged();
}
if (other.hasModificationTime()) {
setModificationTime(other.getModificationTime());
}
if (other.hasAccessTime()) {
setAccessTime(other.getAccessTime());
}
if (other.hasSymlink()) {
setSymlink(other.getSymlink());
}
if (other.hasBlockReplication()) {
setBlockReplication(other.getBlockReplication());
}
if (other.hasBlocksize()) {
setBlocksize(other.getBlocksize());
}
if (other.hasLocations()) {
mergeLocations(other.getLocations());
}
if (other.hasFileId()) {
setFileId(other.getFileId());
}
if (other.hasChildrenNum()) {
setChildrenNum(other.getChildrenNum());
}
if (other.hasFileEncryptionInfo()) {
mergeFileEncryptionInfo(other.getFileEncryptionInfo());
}
if (other.hasStoragePolicy()) {
setStoragePolicy(other.getStoragePolicy());
}
if (other.hasEcPolicy()) {
mergeEcPolicy(other.getEcPolicy());
}
if (other.hasFlags()) {
setFlags(other.getFlags());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasFileType()) {
return false;
}
if (!hasPath()) {
return false;
}
if (!hasLength()) {
return false;
}
if (!hasPermission()) {
return false;
}
if (!hasOwner()) {
return false;
}
if (!hasGroup()) {
return false;
}
if (!hasModificationTime()) {
return false;
}
if (!hasAccessTime()) {
return false;
}
if (!getPermission().isInitialized()) {
return false;
}
if (hasLocations()) {
if (!getLocations().isInitialized()) {
return false;
}
}
if (hasFileEncryptionInfo()) {
if (!getFileEncryptionInfo().isInitialized()) {
return false;
}
}
if (hasEcPolicy()) {
if (!getEcPolicy().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType fileType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.IS_DIR;
/**
* required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1;
*/
public boolean hasFileType() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType getFileType() {
return fileType_;
}
/**
* required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1;
*/
public Builder setFileType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
fileType_ = value;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1;
*/
public Builder clearFileType() {
bitField0_ = (bitField0_ & ~0x00000001);
fileType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.IS_DIR;
onChanged();
return this;
}
// required bytes path = 2;
private com.google.protobuf.ByteString path_ = com.google.protobuf.ByteString.EMPTY;
/**
* required bytes path = 2;
*
*
* local name of inode encoded java UTF8
*
*/
public boolean hasPath() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required bytes path = 2;
*
*
* local name of inode encoded java UTF8
*
*/
public com.google.protobuf.ByteString getPath() {
return path_;
}
/**
* required bytes path = 2;
*
*
* local name of inode encoded java UTF8
*
*/
public Builder setPath(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
path_ = value;
onChanged();
return this;
}
/**
* required bytes path = 2;
*
*
* local name of inode encoded java UTF8
*
*/
public Builder clearPath() {
bitField0_ = (bitField0_ & ~0x00000002);
path_ = getDefaultInstance().getPath();
onChanged();
return this;
}
// required uint64 length = 3;
private long length_ ;
/**
* required uint64 length = 3;
*/
public boolean hasLength() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 length = 3;
*/
public long getLength() {
return length_;
}
/**
* required uint64 length = 3;
*/
public Builder setLength(long value) {
bitField0_ |= 0x00000004;
length_ = value;
onChanged();
return this;
}
/**
* required uint64 length = 3;
*/
public Builder clearLength() {
bitField0_ = (bitField0_ & ~0x00000004);
length_ = 0L;
onChanged();
return this;
}
// required .hadoop.hdfs.FsPermissionProto permission = 4;
private org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto permission_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder> permissionBuilder_;
/**
* required .hadoop.hdfs.FsPermissionProto permission = 4;
*/
public boolean hasPermission() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required .hadoop.hdfs.FsPermissionProto permission = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getPermission() {
if (permissionBuilder_ == null) {
return permission_;
} else {
return permissionBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.FsPermissionProto permission = 4;
*/
public Builder setPermission(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto value) {
if (permissionBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
permission_ = value;
onChanged();
} else {
permissionBuilder_.setMessage(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* required .hadoop.hdfs.FsPermissionProto permission = 4;
*/
public Builder setPermission(
org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder builderForValue) {
if (permissionBuilder_ == null) {
permission_ = builderForValue.build();
onChanged();
} else {
permissionBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000008;
return this;
}
/**
* required .hadoop.hdfs.FsPermissionProto permission = 4;
*/
public Builder mergePermission(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto value) {
if (permissionBuilder_ == null) {
if (((bitField0_ & 0x00000008) == 0x00000008) &&
permission_ != org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance()) {
permission_ =
org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.newBuilder(permission_).mergeFrom(value).buildPartial();
} else {
permission_ = value;
}
onChanged();
} else {
permissionBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* required .hadoop.hdfs.FsPermissionProto permission = 4;
*/
public Builder clearPermission() {
if (permissionBuilder_ == null) {
permission_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance();
onChanged();
} else {
permissionBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
/**
* required .hadoop.hdfs.FsPermissionProto permission = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder getPermissionBuilder() {
bitField0_ |= 0x00000008;
onChanged();
return getPermissionFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.FsPermissionProto permission = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder() {
if (permissionBuilder_ != null) {
return permissionBuilder_.getMessageOrBuilder();
} else {
return permission_;
}
}
/**
* required .hadoop.hdfs.FsPermissionProto permission = 4;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder>
getPermissionFieldBuilder() {
if (permissionBuilder_ == null) {
permissionBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder>(
permission_,
getParentForChildren(),
isClean());
permission_ = null;
}
return permissionBuilder_;
}
// required string owner = 5;
private java.lang.Object owner_ = "";
/**
* required string owner = 5;
*/
public boolean hasOwner() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required string owner = 5;
*/
public java.lang.String getOwner() {
java.lang.Object ref = owner_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
owner_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string owner = 5;
*/
public com.google.protobuf.ByteString
getOwnerBytes() {
java.lang.Object ref = owner_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
owner_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string owner = 5;
*/
public Builder setOwner(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000010;
owner_ = value;
onChanged();
return this;
}
/**
* required string owner = 5;
*/
public Builder clearOwner() {
bitField0_ = (bitField0_ & ~0x00000010);
owner_ = getDefaultInstance().getOwner();
onChanged();
return this;
}
/**
* required string owner = 5;
*/
public Builder setOwnerBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000010;
owner_ = value;
onChanged();
return this;
}
// required string group = 6;
private java.lang.Object group_ = "";
/**
* required string group = 6;
*/
public boolean hasGroup() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* required string group = 6;
*/
public java.lang.String getGroup() {
java.lang.Object ref = group_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
group_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string group = 6;
*/
public com.google.protobuf.ByteString
getGroupBytes() {
java.lang.Object ref = group_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
group_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string group = 6;
*/
public Builder setGroup(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000020;
group_ = value;
onChanged();
return this;
}
/**
* required string group = 6;
*/
public Builder clearGroup() {
bitField0_ = (bitField0_ & ~0x00000020);
group_ = getDefaultInstance().getGroup();
onChanged();
return this;
}
/**
* required string group = 6;
*/
public Builder setGroupBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000020;
group_ = value;
onChanged();
return this;
}
// required uint64 modification_time = 7;
private long modificationTime_ ;
/**
* required uint64 modification_time = 7;
*/
public boolean hasModificationTime() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* required uint64 modification_time = 7;
*/
public long getModificationTime() {
return modificationTime_;
}
/**
* required uint64 modification_time = 7;
*/
public Builder setModificationTime(long value) {
bitField0_ |= 0x00000040;
modificationTime_ = value;
onChanged();
return this;
}
/**
* required uint64 modification_time = 7;
*/
public Builder clearModificationTime() {
bitField0_ = (bitField0_ & ~0x00000040);
modificationTime_ = 0L;
onChanged();
return this;
}
// required uint64 access_time = 8;
private long accessTime_ ;
/**
* required uint64 access_time = 8;
*/
public boolean hasAccessTime() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* required uint64 access_time = 8;
*/
public long getAccessTime() {
return accessTime_;
}
/**
* required uint64 access_time = 8;
*/
public Builder setAccessTime(long value) {
bitField0_ |= 0x00000080;
accessTime_ = value;
onChanged();
return this;
}
/**
* required uint64 access_time = 8;
*/
public Builder clearAccessTime() {
bitField0_ = (bitField0_ & ~0x00000080);
accessTime_ = 0L;
onChanged();
return this;
}
// optional bytes symlink = 9;
private com.google.protobuf.ByteString symlink_ = com.google.protobuf.ByteString.EMPTY;
/**
* optional bytes symlink = 9;
*
*
* Optional fields for symlink
*
*/
public boolean hasSymlink() {
return ((bitField0_ & 0x00000100) == 0x00000100);
}
/**
* optional bytes symlink = 9;
*
*
* Optional fields for symlink
*
*/
public com.google.protobuf.ByteString getSymlink() {
return symlink_;
}
/**
* optional bytes symlink = 9;
*
*
* Optional fields for symlink
*
*/
public Builder setSymlink(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000100;
symlink_ = value;
onChanged();
return this;
}
/**
* optional bytes symlink = 9;
*
*
* Optional fields for symlink
*
*/
public Builder clearSymlink() {
bitField0_ = (bitField0_ & ~0x00000100);
symlink_ = getDefaultInstance().getSymlink();
onChanged();
return this;
}
// optional uint32 block_replication = 10 [default = 0];
private int blockReplication_ ;
/**
* optional uint32 block_replication = 10 [default = 0];
*
*
* Optional fields for file
*
*/
public boolean hasBlockReplication() {
return ((bitField0_ & 0x00000200) == 0x00000200);
}
/**
* optional uint32 block_replication = 10 [default = 0];
*
*
* Optional fields for file
*
*/
public int getBlockReplication() {
return blockReplication_;
}
/**
* optional uint32 block_replication = 10 [default = 0];
*
*
* Optional fields for file
*
*/
public Builder setBlockReplication(int value) {
bitField0_ |= 0x00000200;
blockReplication_ = value;
onChanged();
return this;
}
/**
* optional uint32 block_replication = 10 [default = 0];
*
*
* Optional fields for file
*
*/
public Builder clearBlockReplication() {
bitField0_ = (bitField0_ & ~0x00000200);
blockReplication_ = 0;
onChanged();
return this;
}
// optional uint64 blocksize = 11 [default = 0];
private long blocksize_ ;
/**
* optional uint64 blocksize = 11 [default = 0];
*/
public boolean hasBlocksize() {
return ((bitField0_ & 0x00000400) == 0x00000400);
}
/**
* optional uint64 blocksize = 11 [default = 0];
*/
public long getBlocksize() {
return blocksize_;
}
/**
* optional uint64 blocksize = 11 [default = 0];
*/
public Builder setBlocksize(long value) {
bitField0_ |= 0x00000400;
blocksize_ = value;
onChanged();
return this;
}
/**
* optional uint64 blocksize = 11 [default = 0];
*/
public Builder clearBlocksize() {
bitField0_ = (bitField0_ & ~0x00000400);
blocksize_ = 0L;
onChanged();
return this;
}
// optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder> locationsBuilder_;
/**
* optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
*
*
* suppled only if asked by client
*
*/
public boolean hasLocations() {
return ((bitField0_ & 0x00000800) == 0x00000800);
}
/**
* optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
*
*
* suppled only if asked by client
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getLocations() {
if (locationsBuilder_ == null) {
return locations_;
} else {
return locationsBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
*
*
* suppled only if asked by client
*
*/
public Builder setLocations(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto value) {
if (locationsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
locations_ = value;
onChanged();
} else {
locationsBuilder_.setMessage(value);
}
bitField0_ |= 0x00000800;
return this;
}
/**
* optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
*
*
* suppled only if asked by client
*
*/
public Builder setLocations(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder builderForValue) {
if (locationsBuilder_ == null) {
locations_ = builderForValue.build();
onChanged();
} else {
locationsBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000800;
return this;
}
/**
* optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
*
*
* suppled only if asked by client
*
*/
public Builder mergeLocations(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto value) {
if (locationsBuilder_ == null) {
if (((bitField0_ & 0x00000800) == 0x00000800) &&
locations_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance()) {
locations_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.newBuilder(locations_).mergeFrom(value).buildPartial();
} else {
locations_ = value;
}
onChanged();
} else {
locationsBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000800;
return this;
}
/**
* optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
*
*
* suppled only if asked by client
*
*/
public Builder clearLocations() {
if (locationsBuilder_ == null) {
locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance();
onChanged();
} else {
locationsBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000800);
return this;
}
/**
* optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
*
*
* suppled only if asked by client
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder getLocationsBuilder() {
bitField0_ |= 0x00000800;
onChanged();
return getLocationsFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
*
*
* suppled only if asked by client
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder getLocationsOrBuilder() {
if (locationsBuilder_ != null) {
return locationsBuilder_.getMessageOrBuilder();
} else {
return locations_;
}
}
/**
* optional .hadoop.hdfs.LocatedBlocksProto locations = 12;
*
*
* suppled only if asked by client
*
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder>
getLocationsFieldBuilder() {
if (locationsBuilder_ == null) {
locationsBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder>(
locations_,
getParentForChildren(),
isClean());
locations_ = null;
}
return locationsBuilder_;
}
// optional uint64 fileId = 13 [default = 0];
private long fileId_ ;
/**
* optional uint64 fileId = 13 [default = 0];
*
*
* Optional field for fileId
*
*/
public boolean hasFileId() {
return ((bitField0_ & 0x00001000) == 0x00001000);
}
/**
* optional uint64 fileId = 13 [default = 0];
*
*
* Optional field for fileId
*
*/
public long getFileId() {
return fileId_;
}
/**
* optional uint64 fileId = 13 [default = 0];
*
*
* Optional field for fileId
*
*/
public Builder setFileId(long value) {
bitField0_ |= 0x00001000;
fileId_ = value;
onChanged();
return this;
}
/**
* optional uint64 fileId = 13 [default = 0];
*
*
* Optional field for fileId
*
*/
public Builder clearFileId() {
bitField0_ = (bitField0_ & ~0x00001000);
fileId_ = 0L;
onChanged();
return this;
}
// optional int32 childrenNum = 14 [default = -1];
private int childrenNum_ = -1;
/**
* optional int32 childrenNum = 14 [default = -1];
*/
public boolean hasChildrenNum() {
return ((bitField0_ & 0x00002000) == 0x00002000);
}
/**
* optional int32 childrenNum = 14 [default = -1];
*/
public int getChildrenNum() {
return childrenNum_;
}
/**
* optional int32 childrenNum = 14 [default = -1];
*/
public Builder setChildrenNum(int value) {
bitField0_ |= 0x00002000;
childrenNum_ = value;
onChanged();
return this;
}
/**
* optional int32 childrenNum = 14 [default = -1];
*/
public Builder clearChildrenNum() {
bitField0_ = (bitField0_ & ~0x00002000);
childrenNum_ = -1;
onChanged();
return this;
}
// optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto fileEncryptionInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder> fileEncryptionInfoBuilder_;
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
*
*
* Optional field for file encryption
*
*/
public boolean hasFileEncryptionInfo() {
return ((bitField0_ & 0x00004000) == 0x00004000);
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
*
*
* Optional field for file encryption
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo() {
if (fileEncryptionInfoBuilder_ == null) {
return fileEncryptionInfo_;
} else {
return fileEncryptionInfoBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
*
*
* Optional field for file encryption
*
*/
public Builder setFileEncryptionInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto value) {
if (fileEncryptionInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
fileEncryptionInfo_ = value;
onChanged();
} else {
fileEncryptionInfoBuilder_.setMessage(value);
}
bitField0_ |= 0x00004000;
return this;
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
*
*
* Optional field for file encryption
*
*/
public Builder setFileEncryptionInfo(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder builderForValue) {
if (fileEncryptionInfoBuilder_ == null) {
fileEncryptionInfo_ = builderForValue.build();
onChanged();
} else {
fileEncryptionInfoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00004000;
return this;
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
*
*
* Optional field for file encryption
*
*/
public Builder mergeFileEncryptionInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto value) {
if (fileEncryptionInfoBuilder_ == null) {
if (((bitField0_ & 0x00004000) == 0x00004000) &&
fileEncryptionInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance()) {
fileEncryptionInfo_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.newBuilder(fileEncryptionInfo_).mergeFrom(value).buildPartial();
} else {
fileEncryptionInfo_ = value;
}
onChanged();
} else {
fileEncryptionInfoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00004000;
return this;
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
*
*
* Optional field for file encryption
*
*/
public Builder clearFileEncryptionInfo() {
if (fileEncryptionInfoBuilder_ == null) {
fileEncryptionInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance();
onChanged();
} else {
fileEncryptionInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00004000);
return this;
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
*
*
* Optional field for file encryption
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder getFileEncryptionInfoBuilder() {
bitField0_ |= 0x00004000;
onChanged();
return getFileEncryptionInfoFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
*
*
* Optional field for file encryption
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder() {
if (fileEncryptionInfoBuilder_ != null) {
return fileEncryptionInfoBuilder_.getMessageOrBuilder();
} else {
return fileEncryptionInfo_;
}
}
/**
* optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;
*
*
* Optional field for file encryption
*
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder>
getFileEncryptionInfoFieldBuilder() {
if (fileEncryptionInfoBuilder_ == null) {
fileEncryptionInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder>(
fileEncryptionInfo_,
getParentForChildren(),
isClean());
fileEncryptionInfo_ = null;
}
return fileEncryptionInfoBuilder_;
}
// optional uint32 storagePolicy = 16 [default = 0];
private int storagePolicy_ ;
/**
* optional uint32 storagePolicy = 16 [default = 0];
*
*
* block storage policy id
*
*/
public boolean hasStoragePolicy() {
return ((bitField0_ & 0x00008000) == 0x00008000);
}
/**
* optional uint32 storagePolicy = 16 [default = 0];
*
*
* block storage policy id
*
*/
public int getStoragePolicy() {
return storagePolicy_;
}
/**
* optional uint32 storagePolicy = 16 [default = 0];
*
*
* block storage policy id
*
*/
public Builder setStoragePolicy(int value) {
bitField0_ |= 0x00008000;
storagePolicy_ = value;
onChanged();
return this;
}
/**
* optional uint32 storagePolicy = 16 [default = 0];
*
*
* block storage policy id
*
*/
public Builder clearStoragePolicy() {
bitField0_ = (bitField0_ & ~0x00008000);
storagePolicy_ = 0;
onChanged();
return this;
}
// optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto ecPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> ecPolicyBuilder_;
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;
*
*
* Optional field for erasure coding
*
*/
public boolean hasEcPolicy() {
return ((bitField0_ & 0x00010000) == 0x00010000);
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;
*
*
* Optional field for erasure coding
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy() {
if (ecPolicyBuilder_ == null) {
return ecPolicy_;
} else {
return ecPolicyBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;
*
*
* Optional field for erasure coding
*
*/
public Builder setEcPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) {
if (ecPolicyBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ecPolicy_ = value;
onChanged();
} else {
ecPolicyBuilder_.setMessage(value);
}
bitField0_ |= 0x00010000;
return this;
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;
*
*
* Optional field for erasure coding
*
*/
public Builder setEcPolicy(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder builderForValue) {
if (ecPolicyBuilder_ == null) {
ecPolicy_ = builderForValue.build();
onChanged();
} else {
ecPolicyBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00010000;
return this;
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;
*
*
* Optional field for erasure coding
*
*/
public Builder mergeEcPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) {
if (ecPolicyBuilder_ == null) {
if (((bitField0_ & 0x00010000) == 0x00010000) &&
ecPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance()) {
ecPolicy_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.newBuilder(ecPolicy_).mergeFrom(value).buildPartial();
} else {
ecPolicy_ = value;
}
onChanged();
} else {
ecPolicyBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00010000;
return this;
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;
*
*
* Optional field for erasure coding
*
*/
public Builder clearEcPolicy() {
if (ecPolicyBuilder_ == null) {
ecPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance();
onChanged();
} else {
ecPolicyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00010000);
return this;
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;
*
*
* Optional field for erasure coding
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder getEcPolicyBuilder() {
bitField0_ |= 0x00010000;
onChanged();
return getEcPolicyFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;
*
*
* Optional field for erasure coding
*
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder() {
if (ecPolicyBuilder_ != null) {
return ecPolicyBuilder_.getMessageOrBuilder();
} else {
return ecPolicy_;
}
}
/**
* optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;
*
*
* Optional field for erasure coding
*
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder>
getEcPolicyFieldBuilder() {
if (ecPolicyBuilder_ == null) {
ecPolicyBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder>(
ecPolicy_,
getParentForChildren(),
isClean());
ecPolicy_ = null;
}
return ecPolicyBuilder_;
}
// optional uint32 flags = 18 [default = 0];
private int flags_ ;
/**
* optional uint32 flags = 18 [default = 0];
*
*
* Set of flags
*
*/
public boolean hasFlags() {
return ((bitField0_ & 0x00020000) == 0x00020000);
}
/**
* optional uint32 flags = 18 [default = 0];
*
*
* Set of flags
*
*/
public int getFlags() {
return flags_;
}
/**
* optional uint32 flags = 18 [default = 0];
*
*
* Set of flags
*
*/
public Builder setFlags(int value) {
bitField0_ |= 0x00020000;
flags_ = value;
onChanged();
return this;
}
/**
* optional uint32 flags = 18 [default = 0];
*
*
* Set of flags
*
*/
public Builder clearFlags() {
bitField0_ = (bitField0_ & ~0x00020000);
flags_ = 0;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.HdfsFileStatusProto)
}
static {
defaultInstance = new HdfsFileStatusProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.HdfsFileStatusProto)
}
public interface BlockChecksumOptionsProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC];
/**
* optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC];
*/
boolean hasBlockChecksumType();
/**
* optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC];
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto getBlockChecksumType();
// optional uint64 stripeLength = 2;
/**
* optional uint64 stripeLength = 2;
*
*
* Only used if blockChecksumType specifies a striped format, such as
* COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
* to be the concatenation of N crcs, where
* N == ((requestedLength - 1) / stripedLength) + 1
*
*/
boolean hasStripeLength();
/**
* optional uint64 stripeLength = 2;
*
*
* Only used if blockChecksumType specifies a striped format, such as
* COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
* to be the concatenation of N crcs, where
* N == ((requestedLength - 1) / stripedLength) + 1
*
*/
long getStripeLength();
}
/**
* Protobuf type {@code hadoop.hdfs.BlockChecksumOptionsProto}
*
*
**
* Algorithms/types denoting how block-level checksums are computed using
* lower-level chunk checksums/CRCs.
* These options should be kept in sync with
* org.apache.hadoop.hdfs.protocol.BlockChecksumOptions.
*
*/
public static final class BlockChecksumOptionsProto extends
com.google.protobuf.GeneratedMessage
implements BlockChecksumOptionsProtoOrBuilder {
// Use BlockChecksumOptionsProto.newBuilder() to construct.
private BlockChecksumOptionsProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private BlockChecksumOptionsProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final BlockChecksumOptionsProto defaultInstance;
public static BlockChecksumOptionsProto getDefaultInstance() {
return defaultInstance;
}
public BlockChecksumOptionsProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private BlockChecksumOptionsProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
blockChecksumType_ = value;
}
break;
}
case 16: {
bitField0_ |= 0x00000002;
stripeLength_ = input.readUInt64();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockChecksumOptionsProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockChecksumOptionsProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public BlockChecksumOptionsProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new BlockChecksumOptionsProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC];
public static final int BLOCKCHECKSUMTYPE_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto blockChecksumType_;
/**
* optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC];
*/
public boolean hasBlockChecksumType() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC];
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto getBlockChecksumType() {
return blockChecksumType_;
}
// optional uint64 stripeLength = 2;
public static final int STRIPELENGTH_FIELD_NUMBER = 2;
private long stripeLength_;
/**
* optional uint64 stripeLength = 2;
*
*
* Only used if blockChecksumType specifies a striped format, such as
* COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
* to be the concatenation of N crcs, where
* N == ((requestedLength - 1) / stripedLength) + 1
*
*/
public boolean hasStripeLength() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional uint64 stripeLength = 2;
*
*
* Only used if blockChecksumType specifies a striped format, such as
* COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
* to be the concatenation of N crcs, where
* N == ((requestedLength - 1) / stripedLength) + 1
*
*/
public long getStripeLength() {
return stripeLength_;
}
private void initFields() {
blockChecksumType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto.MD5CRC;
stripeLength_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, blockChecksumType_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, stripeLength_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, blockChecksumType_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, stripeLength_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto) obj;
boolean result = true;
result = result && (hasBlockChecksumType() == other.hasBlockChecksumType());
if (hasBlockChecksumType()) {
result = result &&
(getBlockChecksumType() == other.getBlockChecksumType());
}
result = result && (hasStripeLength() == other.hasStripeLength());
if (hasStripeLength()) {
result = result && (getStripeLength()
== other.getStripeLength());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasBlockChecksumType()) {
hash = (37 * hash) + BLOCKCHECKSUMTYPE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getBlockChecksumType());
}
if (hasStripeLength()) {
hash = (37 * hash) + STRIPELENGTH_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getStripeLength());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.BlockChecksumOptionsProto}
*
*
**
* Algorithms/types denoting how block-level checksums are computed using
* lower-level chunk checksums/CRCs.
* These options should be kept in sync with
* org.apache.hadoop.hdfs.protocol.BlockChecksumOptions.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockChecksumOptionsProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockChecksumOptionsProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
blockChecksumType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto.MD5CRC;
bitField0_ = (bitField0_ & ~0x00000001);
stripeLength_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockChecksumOptionsProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.blockChecksumType_ = blockChecksumType_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.stripeLength_ = stripeLength_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance()) return this;
if (other.hasBlockChecksumType()) {
setBlockChecksumType(other.getBlockChecksumType());
}
if (other.hasStripeLength()) {
setStripeLength(other.getStripeLength());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC];
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto blockChecksumType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto.MD5CRC;
/**
* optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC];
*/
public boolean hasBlockChecksumType() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC];
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto getBlockChecksumType() {
return blockChecksumType_;
}
/**
* optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC];
*/
public Builder setBlockChecksumType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
blockChecksumType_ = value;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC];
*/
public Builder clearBlockChecksumType() {
bitField0_ = (bitField0_ & ~0x00000001);
blockChecksumType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto.MD5CRC;
onChanged();
return this;
}
// optional uint64 stripeLength = 2;
private long stripeLength_ ;
/**
* optional uint64 stripeLength = 2;
*
*
* Only used if blockChecksumType specifies a striped format, such as
* COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
* to be the concatenation of N crcs, where
* N == ((requestedLength - 1) / stripedLength) + 1
*
*/
public boolean hasStripeLength() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional uint64 stripeLength = 2;
*
*
* Only used if blockChecksumType specifies a striped format, such as
* COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
* to be the concatenation of N crcs, where
* N == ((requestedLength - 1) / stripedLength) + 1
*
*/
public long getStripeLength() {
return stripeLength_;
}
/**
* optional uint64 stripeLength = 2;
*
*
* Only used if blockChecksumType specifies a striped format, such as
* COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
* to be the concatenation of N crcs, where
* N == ((requestedLength - 1) / stripedLength) + 1
*
*/
public Builder setStripeLength(long value) {
bitField0_ |= 0x00000002;
stripeLength_ = value;
onChanged();
return this;
}
/**
* optional uint64 stripeLength = 2;
*
*
* Only used if blockChecksumType specifies a striped format, such as
* COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
* to be the concatenation of N crcs, where
* N == ((requestedLength - 1) / stripedLength) + 1
*
*/
public Builder clearStripeLength() {
bitField0_ = (bitField0_ & ~0x00000002);
stripeLength_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.BlockChecksumOptionsProto)
}
static {
defaultInstance = new BlockChecksumOptionsProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.BlockChecksumOptionsProto)
}
public interface FsServerDefaultsProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required uint64 blockSize = 1;
/**
* required uint64 blockSize = 1;
*/
boolean hasBlockSize();
/**
* required uint64 blockSize = 1;
*/
long getBlockSize();
// required uint32 bytesPerChecksum = 2;
/**
* required uint32 bytesPerChecksum = 2;
*/
boolean hasBytesPerChecksum();
/**
* required uint32 bytesPerChecksum = 2;
*/
int getBytesPerChecksum();
// required uint32 writePacketSize = 3;
/**
* required uint32 writePacketSize = 3;
*/
boolean hasWritePacketSize();
/**
* required uint32 writePacketSize = 3;
*/
int getWritePacketSize();
// required uint32 replication = 4;
/**
* required uint32 replication = 4;
*
*
* Actually a short - only 16 bits used
*
*/
boolean hasReplication();
/**
* required uint32 replication = 4;
*
*
* Actually a short - only 16 bits used
*
*/
int getReplication();
// required uint32 fileBufferSize = 5;
/**
* required uint32 fileBufferSize = 5;
*/
boolean hasFileBufferSize();
/**
* required uint32 fileBufferSize = 5;
*/
int getFileBufferSize();
// optional bool encryptDataTransfer = 6 [default = false];
/**
* optional bool encryptDataTransfer = 6 [default = false];
*/
boolean hasEncryptDataTransfer();
/**
* optional bool encryptDataTransfer = 6 [default = false];
*/
boolean getEncryptDataTransfer();
// optional uint64 trashInterval = 7 [default = 0];
/**
* optional uint64 trashInterval = 7 [default = 0];
*/
boolean hasTrashInterval();
/**
* optional uint64 trashInterval = 7 [default = 0];
*/
long getTrashInterval();
// optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];
/**
* optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];
*/
boolean hasChecksumType();
/**
* optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getChecksumType();
// optional string keyProviderUri = 9;
/**
* optional string keyProviderUri = 9;
*/
boolean hasKeyProviderUri();
/**
* optional string keyProviderUri = 9;
*/
java.lang.String getKeyProviderUri();
/**
* optional string keyProviderUri = 9;
*/
com.google.protobuf.ByteString
getKeyProviderUriBytes();
// optional uint32 policyId = 10 [default = 0];
/**
* optional uint32 policyId = 10 [default = 0];
*/
boolean hasPolicyId();
/**
* optional uint32 policyId = 10 [default = 0];
*/
int getPolicyId();
}
/**
* Protobuf type {@code hadoop.hdfs.FsServerDefaultsProto}
*
*
**
* HDFS Server Defaults
*
*/
public static final class FsServerDefaultsProto extends
com.google.protobuf.GeneratedMessage
implements FsServerDefaultsProtoOrBuilder {
// Use FsServerDefaultsProto.newBuilder() to construct.
private FsServerDefaultsProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private FsServerDefaultsProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final FsServerDefaultsProto defaultInstance;
public static FsServerDefaultsProto getDefaultInstance() {
return defaultInstance;
}
public FsServerDefaultsProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private FsServerDefaultsProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
blockSize_ = input.readUInt64();
break;
}
case 16: {
bitField0_ |= 0x00000002;
bytesPerChecksum_ = input.readUInt32();
break;
}
case 24: {
bitField0_ |= 0x00000004;
writePacketSize_ = input.readUInt32();
break;
}
case 32: {
bitField0_ |= 0x00000008;
replication_ = input.readUInt32();
break;
}
case 40: {
bitField0_ |= 0x00000010;
fileBufferSize_ = input.readUInt32();
break;
}
case 48: {
bitField0_ |= 0x00000020;
encryptDataTransfer_ = input.readBool();
break;
}
case 56: {
bitField0_ |= 0x00000040;
trashInterval_ = input.readUInt64();
break;
}
case 64: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(8, rawValue);
} else {
bitField0_ |= 0x00000080;
checksumType_ = value;
}
break;
}
case 74: {
bitField0_ |= 0x00000100;
keyProviderUri_ = input.readBytes();
break;
}
case 80: {
bitField0_ |= 0x00000200;
policyId_ = input.readUInt32();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsServerDefaultsProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsServerDefaultsProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public FsServerDefaultsProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new FsServerDefaultsProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required uint64 blockSize = 1;
public static final int BLOCKSIZE_FIELD_NUMBER = 1;
private long blockSize_;
/**
* required uint64 blockSize = 1;
*/
public boolean hasBlockSize() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint64 blockSize = 1;
*/
public long getBlockSize() {
return blockSize_;
}
// required uint32 bytesPerChecksum = 2;
public static final int BYTESPERCHECKSUM_FIELD_NUMBER = 2;
private int bytesPerChecksum_;
/**
* required uint32 bytesPerChecksum = 2;
*/
public boolean hasBytesPerChecksum() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint32 bytesPerChecksum = 2;
*/
public int getBytesPerChecksum() {
return bytesPerChecksum_;
}
// required uint32 writePacketSize = 3;
public static final int WRITEPACKETSIZE_FIELD_NUMBER = 3;
private int writePacketSize_;
/**
* required uint32 writePacketSize = 3;
*/
public boolean hasWritePacketSize() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint32 writePacketSize = 3;
*/
public int getWritePacketSize() {
return writePacketSize_;
}
// required uint32 replication = 4;
public static final int REPLICATION_FIELD_NUMBER = 4;
private int replication_;
/**
* required uint32 replication = 4;
*
*
* Actually a short - only 16 bits used
*
*/
public boolean hasReplication() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required uint32 replication = 4;
*
*
* Actually a short - only 16 bits used
*
*/
public int getReplication() {
return replication_;
}
// required uint32 fileBufferSize = 5;
public static final int FILEBUFFERSIZE_FIELD_NUMBER = 5;
private int fileBufferSize_;
/**
* required uint32 fileBufferSize = 5;
*/
public boolean hasFileBufferSize() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required uint32 fileBufferSize = 5;
*/
public int getFileBufferSize() {
return fileBufferSize_;
}
// optional bool encryptDataTransfer = 6 [default = false];
public static final int ENCRYPTDATATRANSFER_FIELD_NUMBER = 6;
private boolean encryptDataTransfer_;
/**
* optional bool encryptDataTransfer = 6 [default = false];
*/
public boolean hasEncryptDataTransfer() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional bool encryptDataTransfer = 6 [default = false];
*/
public boolean getEncryptDataTransfer() {
return encryptDataTransfer_;
}
// optional uint64 trashInterval = 7 [default = 0];
public static final int TRASHINTERVAL_FIELD_NUMBER = 7;
private long trashInterval_;
/**
* optional uint64 trashInterval = 7 [default = 0];
*/
public boolean hasTrashInterval() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional uint64 trashInterval = 7 [default = 0];
*/
public long getTrashInterval() {
return trashInterval_;
}
// optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];
public static final int CHECKSUMTYPE_FIELD_NUMBER = 8;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto checksumType_;
/**
* optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];
*/
public boolean hasChecksumType() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getChecksumType() {
return checksumType_;
}
// optional string keyProviderUri = 9;
public static final int KEYPROVIDERURI_FIELD_NUMBER = 9;
private java.lang.Object keyProviderUri_;
/**
* optional string keyProviderUri = 9;
*/
public boolean hasKeyProviderUri() {
return ((bitField0_ & 0x00000100) == 0x00000100);
}
/**
* optional string keyProviderUri = 9;
*/
public java.lang.String getKeyProviderUri() {
java.lang.Object ref = keyProviderUri_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
keyProviderUri_ = s;
}
return s;
}
}
/**
* optional string keyProviderUri = 9;
*/
public com.google.protobuf.ByteString
getKeyProviderUriBytes() {
java.lang.Object ref = keyProviderUri_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
keyProviderUri_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// optional uint32 policyId = 10 [default = 0];
public static final int POLICYID_FIELD_NUMBER = 10;
private int policyId_;
/**
* optional uint32 policyId = 10 [default = 0];
*/
public boolean hasPolicyId() {
return ((bitField0_ & 0x00000200) == 0x00000200);
}
/**
* optional uint32 policyId = 10 [default = 0];
*/
public int getPolicyId() {
return policyId_;
}
private void initFields() {
blockSize_ = 0L;
bytesPerChecksum_ = 0;
writePacketSize_ = 0;
replication_ = 0;
fileBufferSize_ = 0;
encryptDataTransfer_ = false;
trashInterval_ = 0L;
checksumType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32;
keyProviderUri_ = "";
policyId_ = 0;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasBlockSize()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasBytesPerChecksum()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasWritePacketSize()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasReplication()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasFileBufferSize()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt64(1, blockSize_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt32(2, bytesPerChecksum_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt32(3, writePacketSize_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt32(4, replication_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeUInt32(5, fileBufferSize_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeBool(6, encryptDataTransfer_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeUInt64(7, trashInterval_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
output.writeEnum(8, checksumType_.getNumber());
}
if (((bitField0_ & 0x00000100) == 0x00000100)) {
output.writeBytes(9, getKeyProviderUriBytes());
}
if (((bitField0_ & 0x00000200) == 0x00000200)) {
output.writeUInt32(10, policyId_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(1, blockSize_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(2, bytesPerChecksum_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(3, writePacketSize_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(4, replication_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(5, fileBufferSize_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(6, encryptDataTransfer_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(7, trashInterval_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(8, checksumType_.getNumber());
}
if (((bitField0_ & 0x00000100) == 0x00000100)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(9, getKeyProviderUriBytes());
}
if (((bitField0_ & 0x00000200) == 0x00000200)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(10, policyId_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto) obj;
boolean result = true;
result = result && (hasBlockSize() == other.hasBlockSize());
if (hasBlockSize()) {
result = result && (getBlockSize()
== other.getBlockSize());
}
result = result && (hasBytesPerChecksum() == other.hasBytesPerChecksum());
if (hasBytesPerChecksum()) {
result = result && (getBytesPerChecksum()
== other.getBytesPerChecksum());
}
result = result && (hasWritePacketSize() == other.hasWritePacketSize());
if (hasWritePacketSize()) {
result = result && (getWritePacketSize()
== other.getWritePacketSize());
}
result = result && (hasReplication() == other.hasReplication());
if (hasReplication()) {
result = result && (getReplication()
== other.getReplication());
}
result = result && (hasFileBufferSize() == other.hasFileBufferSize());
if (hasFileBufferSize()) {
result = result && (getFileBufferSize()
== other.getFileBufferSize());
}
result = result && (hasEncryptDataTransfer() == other.hasEncryptDataTransfer());
if (hasEncryptDataTransfer()) {
result = result && (getEncryptDataTransfer()
== other.getEncryptDataTransfer());
}
result = result && (hasTrashInterval() == other.hasTrashInterval());
if (hasTrashInterval()) {
result = result && (getTrashInterval()
== other.getTrashInterval());
}
result = result && (hasChecksumType() == other.hasChecksumType());
if (hasChecksumType()) {
result = result &&
(getChecksumType() == other.getChecksumType());
}
result = result && (hasKeyProviderUri() == other.hasKeyProviderUri());
if (hasKeyProviderUri()) {
result = result && getKeyProviderUri()
.equals(other.getKeyProviderUri());
}
result = result && (hasPolicyId() == other.hasPolicyId());
if (hasPolicyId()) {
result = result && (getPolicyId()
== other.getPolicyId());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasBlockSize()) {
hash = (37 * hash) + BLOCKSIZE_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getBlockSize());
}
if (hasBytesPerChecksum()) {
hash = (37 * hash) + BYTESPERCHECKSUM_FIELD_NUMBER;
hash = (53 * hash) + getBytesPerChecksum();
}
if (hasWritePacketSize()) {
hash = (37 * hash) + WRITEPACKETSIZE_FIELD_NUMBER;
hash = (53 * hash) + getWritePacketSize();
}
if (hasReplication()) {
hash = (37 * hash) + REPLICATION_FIELD_NUMBER;
hash = (53 * hash) + getReplication();
}
if (hasFileBufferSize()) {
hash = (37 * hash) + FILEBUFFERSIZE_FIELD_NUMBER;
hash = (53 * hash) + getFileBufferSize();
}
if (hasEncryptDataTransfer()) {
hash = (37 * hash) + ENCRYPTDATATRANSFER_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getEncryptDataTransfer());
}
if (hasTrashInterval()) {
hash = (37 * hash) + TRASHINTERVAL_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getTrashInterval());
}
if (hasChecksumType()) {
hash = (37 * hash) + CHECKSUMTYPE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getChecksumType());
}
if (hasKeyProviderUri()) {
hash = (37 * hash) + KEYPROVIDERURI_FIELD_NUMBER;
hash = (53 * hash) + getKeyProviderUri().hashCode();
}
if (hasPolicyId()) {
hash = (37 * hash) + POLICYID_FIELD_NUMBER;
hash = (53 * hash) + getPolicyId();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.FsServerDefaultsProto}
*
*
**
* HDFS Server Defaults
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsServerDefaultsProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsServerDefaultsProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
blockSize_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
bytesPerChecksum_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
writePacketSize_ = 0;
bitField0_ = (bitField0_ & ~0x00000004);
replication_ = 0;
bitField0_ = (bitField0_ & ~0x00000008);
fileBufferSize_ = 0;
bitField0_ = (bitField0_ & ~0x00000010);
encryptDataTransfer_ = false;
bitField0_ = (bitField0_ & ~0x00000020);
trashInterval_ = 0L;
bitField0_ = (bitField0_ & ~0x00000040);
checksumType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32;
bitField0_ = (bitField0_ & ~0x00000080);
keyProviderUri_ = "";
bitField0_ = (bitField0_ & ~0x00000100);
policyId_ = 0;
bitField0_ = (bitField0_ & ~0x00000200);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsServerDefaultsProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto build() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.blockSize_ = blockSize_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.bytesPerChecksum_ = bytesPerChecksum_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.writePacketSize_ = writePacketSize_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.replication_ = replication_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.fileBufferSize_ = fileBufferSize_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000020;
}
result.encryptDataTransfer_ = encryptDataTransfer_;
if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
to_bitField0_ |= 0x00000040;
}
result.trashInterval_ = trashInterval_;
if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
to_bitField0_ |= 0x00000080;
}
result.checksumType_ = checksumType_;
if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
to_bitField0_ |= 0x00000100;
}
result.keyProviderUri_ = keyProviderUri_;
if (((from_bitField0_ & 0x00000200) == 0x00000200)) {
to_bitField0_ |= 0x00000200;
}
result.policyId_ = policyId_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.getDefaultInstance()) return this;
if (other.hasBlockSize()) {
setBlockSize(other.getBlockSize());
}
if (other.hasBytesPerChecksum()) {
setBytesPerChecksum(other.getBytesPerChecksum());
}
if (other.hasWritePacketSize()) {
setWritePacketSize(other.getWritePacketSize());
}
if (other.hasReplication()) {
setReplication(other.getReplication());
}
if (other.hasFileBufferSize()) {
setFileBufferSize(other.getFileBufferSize());
}
if (other.hasEncryptDataTransfer()) {
setEncryptDataTransfer(other.getEncryptDataTransfer());
}
if (other.hasTrashInterval()) {
setTrashInterval(other.getTrashInterval());
}
if (other.hasChecksumType()) {
setChecksumType(other.getChecksumType());
}
if (other.hasKeyProviderUri()) {
bitField0_ |= 0x00000100;
keyProviderUri_ = other.keyProviderUri_;
onChanged();
}
if (other.hasPolicyId()) {
setPolicyId(other.getPolicyId());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasBlockSize()) {
return false;
}
if (!hasBytesPerChecksum()) {
return false;
}
if (!hasWritePacketSize()) {
return false;
}
if (!hasReplication()) {
return false;
}
if (!hasFileBufferSize()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required uint64 blockSize = 1;
private long blockSize_ ;
/**
* required uint64 blockSize = 1;
*/
public boolean hasBlockSize() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint64 blockSize = 1;
*/
public long getBlockSize() {
return blockSize_;
}
/**
* required uint64 blockSize = 1;
*/
public Builder setBlockSize(long value) {
bitField0_ |= 0x00000001;
blockSize_ = value;
onChanged();
return this;
}
/**
* required uint64 blockSize = 1;
*/
public Builder clearBlockSize() {
bitField0_ = (bitField0_ & ~0x00000001);
blockSize_ = 0L;
onChanged();
return this;
}
// required uint32 bytesPerChecksum = 2;
private int bytesPerChecksum_ ;
/**
* required uint32 bytesPerChecksum = 2;
*/
public boolean hasBytesPerChecksum() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint32 bytesPerChecksum = 2;
*/
public int getBytesPerChecksum() {
return bytesPerChecksum_;
}
/**
* required uint32 bytesPerChecksum = 2;
*/
public Builder setBytesPerChecksum(int value) {
bitField0_ |= 0x00000002;
bytesPerChecksum_ = value;
onChanged();
return this;
}
/**
* required uint32 bytesPerChecksum = 2;
*/
public Builder clearBytesPerChecksum() {
bitField0_ = (bitField0_ & ~0x00000002);
bytesPerChecksum_ = 0;
onChanged();
return this;
}
// required uint32 writePacketSize = 3;
private int writePacketSize_ ;
/**
* required uint32 writePacketSize = 3;
*/
public boolean hasWritePacketSize() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint32 writePacketSize = 3;
*/
public int getWritePacketSize() {
return writePacketSize_;
}
/**
* required uint32 writePacketSize = 3;
*/
public Builder setWritePacketSize(int value) {
bitField0_ |= 0x00000004;
writePacketSize_ = value;
onChanged();
return this;
}
/**
* required uint32 writePacketSize = 3;
*/
public Builder clearWritePacketSize() {
bitField0_ = (bitField0_ & ~0x00000004);
writePacketSize_ = 0;
onChanged();
return this;
}
// required uint32 replication = 4;
private int replication_ ;
/**
* required uint32 replication = 4;
*
*
* Actually a short - only 16 bits used
*
*/
public boolean hasReplication() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required uint32 replication = 4;
*
*
* Actually a short - only 16 bits used
*
*/
public int getReplication() {
return replication_;
}
/**
* required uint32 replication = 4;
*
*
* Actually a short - only 16 bits used
*
*/
public Builder setReplication(int value) {
bitField0_ |= 0x00000008;
replication_ = value;
onChanged();
return this;
}
/**
* required uint32 replication = 4;
*
*
* Actually a short - only 16 bits used
*
*/
public Builder clearReplication() {
bitField0_ = (bitField0_ & ~0x00000008);
replication_ = 0;
onChanged();
return this;
}
// required uint32 fileBufferSize = 5;
private int fileBufferSize_ ;
/**
* required uint32 fileBufferSize = 5;
*/
public boolean hasFileBufferSize() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required uint32 fileBufferSize = 5;
*/
public int getFileBufferSize() {
return fileBufferSize_;
}
/**
* required uint32 fileBufferSize = 5;
*/
public Builder setFileBufferSize(int value) {
bitField0_ |= 0x00000010;
fileBufferSize_ = value;
onChanged();
return this;
}
/**
* required uint32 fileBufferSize = 5;
*/
public Builder clearFileBufferSize() {
bitField0_ = (bitField0_ & ~0x00000010);
fileBufferSize_ = 0;
onChanged();
return this;
}
// optional bool encryptDataTransfer = 6 [default = false];
private boolean encryptDataTransfer_ ;
/**
* optional bool encryptDataTransfer = 6 [default = false];
*/
public boolean hasEncryptDataTransfer() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional bool encryptDataTransfer = 6 [default = false];
*/
public boolean getEncryptDataTransfer() {
return encryptDataTransfer_;
}
/**
* optional bool encryptDataTransfer = 6 [default = false];
*/
public Builder setEncryptDataTransfer(boolean value) {
bitField0_ |= 0x00000020;
encryptDataTransfer_ = value;
onChanged();
return this;
}
/**
* optional bool encryptDataTransfer = 6 [default = false];
*/
public Builder clearEncryptDataTransfer() {
bitField0_ = (bitField0_ & ~0x00000020);
encryptDataTransfer_ = false;
onChanged();
return this;
}
// optional uint64 trashInterval = 7 [default = 0];
private long trashInterval_ ;
/**
* optional uint64 trashInterval = 7 [default = 0];
*/
public boolean hasTrashInterval() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional uint64 trashInterval = 7 [default = 0];
*/
public long getTrashInterval() {
return trashInterval_;
}
/**
* optional uint64 trashInterval = 7 [default = 0];
*/
public Builder setTrashInterval(long value) {
bitField0_ |= 0x00000040;
trashInterval_ = value;
onChanged();
return this;
}
/**
* optional uint64 trashInterval = 7 [default = 0];
*/
public Builder clearTrashInterval() {
bitField0_ = (bitField0_ & ~0x00000040);
trashInterval_ = 0L;
onChanged();
return this;
}
// optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto checksumType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32;
/**
* optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];
*/
public boolean hasChecksumType() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getChecksumType() {
return checksumType_;
}
/**
* optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];
*/
public Builder setChecksumType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000080;
checksumType_ = value;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];
*/
public Builder clearChecksumType() {
bitField0_ = (bitField0_ & ~0x00000080);
checksumType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32;
onChanged();
return this;
}
// optional string keyProviderUri = 9;
private java.lang.Object keyProviderUri_ = "";
/**
* optional string keyProviderUri = 9;
*/
public boolean hasKeyProviderUri() {
return ((bitField0_ & 0x00000100) == 0x00000100);
}
/**
* optional string keyProviderUri = 9;
*/
public java.lang.String getKeyProviderUri() {
java.lang.Object ref = keyProviderUri_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
keyProviderUri_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string keyProviderUri = 9;
*/
public com.google.protobuf.ByteString
getKeyProviderUriBytes() {
java.lang.Object ref = keyProviderUri_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
keyProviderUri_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* optional string keyProviderUri = 9;
*/
public Builder setKeyProviderUri(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000100;
keyProviderUri_ = value;
onChanged();
return this;
}
/**
* optional string keyProviderUri = 9;
*/
public Builder clearKeyProviderUri() {
bitField0_ = (bitField0_ & ~0x00000100);
keyProviderUri_ = getDefaultInstance().getKeyProviderUri();
onChanged();
return this;
}
/**
* optional string keyProviderUri = 9;
*/
public Builder setKeyProviderUriBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000100;
keyProviderUri_ = value;
onChanged();
return this;
}
// optional uint32 policyId = 10 [default = 0];
private int policyId_ ;
/**
* optional uint32 policyId = 10 [default = 0];
*/
public boolean hasPolicyId() {
return ((bitField0_ & 0x00000200) == 0x00000200);
}
/**
* optional uint32 policyId = 10 [default = 0];
*/
public int getPolicyId() {
return policyId_;
}
/**
* optional uint32 policyId = 10 [default = 0];
*/
public Builder setPolicyId(int value) {
bitField0_ |= 0x00000200;
policyId_ = value;
onChanged();
return this;
}
/**
* optional uint32 policyId = 10 [default = 0];
*/
public Builder clearPolicyId() {
bitField0_ = (bitField0_ & ~0x00000200);
policyId_ = 0;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.FsServerDefaultsProto)
}
static {
defaultInstance = new FsServerDefaultsProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.FsServerDefaultsProto)
}
public interface DirectoryListingProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
java.util.List
getPartialListingList();
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index);
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
int getPartialListingCount();
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>
getPartialListingOrBuilderList();
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder(
int index);
// required uint32 remainingEntries = 2;
/**
* required uint32 remainingEntries = 2;
*/
boolean hasRemainingEntries();
/**
* required uint32 remainingEntries = 2;
*/
int getRemainingEntries();
}
/**
* Protobuf type {@code hadoop.hdfs.DirectoryListingProto}
*
*
**
* Directory listing
*
*/
public static final class DirectoryListingProto extends
com.google.protobuf.GeneratedMessage
implements DirectoryListingProtoOrBuilder {
// Use DirectoryListingProto.newBuilder() to construct.
private DirectoryListingProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private DirectoryListingProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final DirectoryListingProto defaultInstance;
public static DirectoryListingProto getDefaultInstance() {
return defaultInstance;
}
public DirectoryListingProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DirectoryListingProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
partialListing_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
partialListing_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.PARSER, extensionRegistry));
break;
}
case 16: {
bitField0_ |= 0x00000001;
remainingEntries_ = input.readUInt32();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
partialListing_ = java.util.Collections.unmodifiableList(partialListing_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DirectoryListingProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DirectoryListingProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public DirectoryListingProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new DirectoryListingProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
public static final int PARTIALLISTING_FIELD_NUMBER = 1;
private java.util.List partialListing_;
/**
* repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;
*/
public java.util.List getPartialListingList() {
return partialListing_;
}
/**
*