Please wait. This can take some minutes ...
Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance.
Project price only 1 $
You can buy this project and download/modify it how often you want.
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos Maven / Gradle / Ivy
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: datatransfer.proto
package org.apache.hadoop.hdfs.protocol.proto;
public final class DataTransferProtos {
private DataTransferProtos() {}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
}
/**
* Protobuf enum {@code hadoop.hdfs.Status}
*
*
* Status is a 4-bit enum
*
*/
public enum Status
implements com.google.protobuf.ProtocolMessageEnum {
/**
* SUCCESS = 0;
*/
SUCCESS(0, 0),
/**
* ERROR = 1;
*/
ERROR(1, 1),
/**
* ERROR_CHECKSUM = 2;
*/
ERROR_CHECKSUM(2, 2),
/**
* ERROR_INVALID = 3;
*/
ERROR_INVALID(3, 3),
/**
* ERROR_EXISTS = 4;
*/
ERROR_EXISTS(4, 4),
/**
* ERROR_ACCESS_TOKEN = 5;
*/
ERROR_ACCESS_TOKEN(5, 5),
/**
* CHECKSUM_OK = 6;
*/
CHECKSUM_OK(6, 6),
/**
* ERROR_UNSUPPORTED = 7;
*/
ERROR_UNSUPPORTED(7, 7),
/**
* OOB_RESTART = 8;
*
*
* Quick restart
*
*/
OOB_RESTART(8, 8),
/**
* OOB_RESERVED1 = 9;
*
*
* Reserved
*
*/
OOB_RESERVED1(9, 9),
/**
* OOB_RESERVED2 = 10;
*
*
* Reserved
*
*/
OOB_RESERVED2(10, 10),
/**
* OOB_RESERVED3 = 11;
*
*
* Reserved
*
*/
OOB_RESERVED3(11, 11),
/**
* IN_PROGRESS = 12;
*/
IN_PROGRESS(12, 12),
/**
* ERROR_BLOCK_PINNED = 13;
*/
ERROR_BLOCK_PINNED(13, 13),
;
/**
* SUCCESS = 0;
*/
public static final int SUCCESS_VALUE = 0;
/**
* ERROR = 1;
*/
public static final int ERROR_VALUE = 1;
/**
* ERROR_CHECKSUM = 2;
*/
public static final int ERROR_CHECKSUM_VALUE = 2;
/**
* ERROR_INVALID = 3;
*/
public static final int ERROR_INVALID_VALUE = 3;
/**
* ERROR_EXISTS = 4;
*/
public static final int ERROR_EXISTS_VALUE = 4;
/**
* ERROR_ACCESS_TOKEN = 5;
*/
public static final int ERROR_ACCESS_TOKEN_VALUE = 5;
/**
* CHECKSUM_OK = 6;
*/
public static final int CHECKSUM_OK_VALUE = 6;
/**
* ERROR_UNSUPPORTED = 7;
*/
public static final int ERROR_UNSUPPORTED_VALUE = 7;
/**
* OOB_RESTART = 8;
*
*
* Quick restart
*
*/
public static final int OOB_RESTART_VALUE = 8;
/**
* OOB_RESERVED1 = 9;
*
*
* Reserved
*
*/
public static final int OOB_RESERVED1_VALUE = 9;
/**
* OOB_RESERVED2 = 10;
*
*
* Reserved
*
*/
public static final int OOB_RESERVED2_VALUE = 10;
/**
* OOB_RESERVED3 = 11;
*
*
* Reserved
*
*/
public static final int OOB_RESERVED3_VALUE = 11;
/**
* IN_PROGRESS = 12;
*/
public static final int IN_PROGRESS_VALUE = 12;
/**
* ERROR_BLOCK_PINNED = 13;
*/
public static final int ERROR_BLOCK_PINNED_VALUE = 13;
public final int getNumber() { return value; }
public static Status valueOf(int value) {
switch (value) {
case 0: return SUCCESS;
case 1: return ERROR;
case 2: return ERROR_CHECKSUM;
case 3: return ERROR_INVALID;
case 4: return ERROR_EXISTS;
case 5: return ERROR_ACCESS_TOKEN;
case 6: return CHECKSUM_OK;
case 7: return ERROR_UNSUPPORTED;
case 8: return OOB_RESTART;
case 9: return OOB_RESERVED1;
case 10: return OOB_RESERVED2;
case 11: return OOB_RESERVED3;
case 12: return IN_PROGRESS;
case 13: return ERROR_BLOCK_PINNED;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public Status findValueByNumber(int number) {
return Status.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.getDescriptor().getEnumTypes().get(0);
}
private static final Status[] VALUES = values();
public static Status valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private Status(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.Status)
}
/**
* Protobuf enum {@code hadoop.hdfs.ShortCircuitFdResponse}
*/
public enum ShortCircuitFdResponse
implements com.google.protobuf.ProtocolMessageEnum {
/**
* DO_NOT_USE_RECEIPT_VERIFICATION = 0;
*/
DO_NOT_USE_RECEIPT_VERIFICATION(0, 0),
/**
* USE_RECEIPT_VERIFICATION = 1;
*/
USE_RECEIPT_VERIFICATION(1, 1),
;
/**
* DO_NOT_USE_RECEIPT_VERIFICATION = 0;
*/
public static final int DO_NOT_USE_RECEIPT_VERIFICATION_VALUE = 0;
/**
* USE_RECEIPT_VERIFICATION = 1;
*/
public static final int USE_RECEIPT_VERIFICATION_VALUE = 1;
public final int getNumber() { return value; }
public static ShortCircuitFdResponse valueOf(int value) {
switch (value) {
case 0: return DO_NOT_USE_RECEIPT_VERIFICATION;
case 1: return USE_RECEIPT_VERIFICATION;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public ShortCircuitFdResponse findValueByNumber(int number) {
return ShortCircuitFdResponse.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.getDescriptor().getEnumTypes().get(1);
}
private static final ShortCircuitFdResponse[] VALUES = values();
public static ShortCircuitFdResponse valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private ShortCircuitFdResponse(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.ShortCircuitFdResponse)
}
public interface DataTransferEncryptorMessageProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus status = 1;
/**
* required .hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus status = 1;
*/
boolean hasStatus();
/**
* required .hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus status = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus getStatus();
// optional bytes payload = 2;
/**
* optional bytes payload = 2;
*/
boolean hasPayload();
/**
* optional bytes payload = 2;
*/
com.google.protobuf.ByteString getPayload();
// optional string message = 3;
/**
* optional string message = 3;
*/
boolean hasMessage();
/**
* optional string message = 3;
*/
java.lang.String getMessage();
/**
* optional string message = 3;
*/
com.google.protobuf.ByteString
getMessageBytes();
// repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
java.util.List
getCipherOptionList();
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto getCipherOption(int index);
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
int getCipherOptionCount();
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder>
getCipherOptionOrBuilderList();
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder getCipherOptionOrBuilder(
int index);
}
/**
* Protobuf type {@code hadoop.hdfs.DataTransferEncryptorMessageProto}
*/
public static final class DataTransferEncryptorMessageProto extends
com.google.protobuf.GeneratedMessage
implements DataTransferEncryptorMessageProtoOrBuilder {
// Use DataTransferEncryptorMessageProto.newBuilder() to construct.
private DataTransferEncryptorMessageProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private DataTransferEncryptorMessageProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final DataTransferEncryptorMessageProto defaultInstance;
public static DataTransferEncryptorMessageProto getDefaultInstance() {
return defaultInstance;
}
public DataTransferEncryptorMessageProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DataTransferEncryptorMessageProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
status_ = value;
}
break;
}
case 18: {
bitField0_ |= 0x00000002;
payload_ = input.readBytes();
break;
}
case 26: {
bitField0_ |= 0x00000004;
message_ = input.readBytes();
break;
}
case 34: {
if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
cipherOption_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000008;
}
cipherOption_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.PARSER, extensionRegistry));
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
cipherOption_ = java.util.Collections.unmodifiableList(cipherOption_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public DataTransferEncryptorMessageProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new DataTransferEncryptorMessageProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
/**
* Protobuf enum {@code hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus}
*/
public enum DataTransferEncryptorStatus
implements com.google.protobuf.ProtocolMessageEnum {
/**
* SUCCESS = 0;
*/
SUCCESS(0, 0),
/**
* ERROR_UNKNOWN_KEY = 1;
*/
ERROR_UNKNOWN_KEY(1, 1),
/**
* ERROR = 2;
*/
ERROR(2, 2),
;
/**
* SUCCESS = 0;
*/
public static final int SUCCESS_VALUE = 0;
/**
* ERROR_UNKNOWN_KEY = 1;
*/
public static final int ERROR_UNKNOWN_KEY_VALUE = 1;
/**
* ERROR = 2;
*/
public static final int ERROR_VALUE = 2;
public final int getNumber() { return value; }
public static DataTransferEncryptorStatus valueOf(int value) {
switch (value) {
case 0: return SUCCESS;
case 1: return ERROR_UNKNOWN_KEY;
case 2: return ERROR;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public DataTransferEncryptorStatus findValueByNumber(int number) {
return DataTransferEncryptorStatus.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.getDescriptor().getEnumTypes().get(0);
}
private static final DataTransferEncryptorStatus[] VALUES = values();
public static DataTransferEncryptorStatus valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private DataTransferEncryptorStatus(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus)
}
private int bitField0_;
// required .hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus status = 1;
public static final int STATUS_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus status_;
/**
* required .hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus status = 1;
*/
public boolean hasStatus() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus status = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus getStatus() {
return status_;
}
// optional bytes payload = 2;
public static final int PAYLOAD_FIELD_NUMBER = 2;
private com.google.protobuf.ByteString payload_;
/**
* optional bytes payload = 2;
*/
public boolean hasPayload() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional bytes payload = 2;
*/
public com.google.protobuf.ByteString getPayload() {
return payload_;
}
// optional string message = 3;
public static final int MESSAGE_FIELD_NUMBER = 3;
private java.lang.Object message_;
/**
* optional string message = 3;
*/
public boolean hasMessage() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional string message = 3;
*/
public java.lang.String getMessage() {
java.lang.Object ref = message_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
message_ = s;
}
return s;
}
}
/**
* optional string message = 3;
*/
public com.google.protobuf.ByteString
getMessageBytes() {
java.lang.Object ref = message_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
message_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
public static final int CIPHEROPTION_FIELD_NUMBER = 4;
private java.util.List cipherOption_;
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public java.util.List getCipherOptionList() {
return cipherOption_;
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder>
getCipherOptionOrBuilderList() {
return cipherOption_;
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public int getCipherOptionCount() {
return cipherOption_.size();
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto getCipherOption(int index) {
return cipherOption_.get(index);
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder getCipherOptionOrBuilder(
int index) {
return cipherOption_.get(index);
}
private void initFields() {
status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus.SUCCESS;
payload_ = com.google.protobuf.ByteString.EMPTY;
message_ = "";
cipherOption_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasStatus()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getCipherOptionCount(); i++) {
if (!getCipherOption(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, status_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, payload_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, getMessageBytes());
}
for (int i = 0; i < cipherOption_.size(); i++) {
output.writeMessage(4, cipherOption_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, status_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, payload_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, getMessageBytes());
}
for (int i = 0; i < cipherOption_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(4, cipherOption_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto) obj;
boolean result = true;
result = result && (hasStatus() == other.hasStatus());
if (hasStatus()) {
result = result &&
(getStatus() == other.getStatus());
}
result = result && (hasPayload() == other.hasPayload());
if (hasPayload()) {
result = result && getPayload()
.equals(other.getPayload());
}
result = result && (hasMessage() == other.hasMessage());
if (hasMessage()) {
result = result && getMessage()
.equals(other.getMessage());
}
result = result && getCipherOptionList()
.equals(other.getCipherOptionList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasStatus()) {
hash = (37 * hash) + STATUS_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getStatus());
}
if (hasPayload()) {
hash = (37 * hash) + PAYLOAD_FIELD_NUMBER;
hash = (53 * hash) + getPayload().hashCode();
}
if (hasMessage()) {
hash = (37 * hash) + MESSAGE_FIELD_NUMBER;
hash = (53 * hash) + getMessage().hashCode();
}
if (getCipherOptionCount() > 0) {
hash = (37 * hash) + CIPHEROPTION_FIELD_NUMBER;
hash = (53 * hash) + getCipherOptionList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.DataTransferEncryptorMessageProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getCipherOptionFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus.SUCCESS;
bitField0_ = (bitField0_ & ~0x00000001);
payload_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000002);
message_ = "";
bitField0_ = (bitField0_ & ~0x00000004);
if (cipherOptionBuilder_ == null) {
cipherOption_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000008);
} else {
cipherOptionBuilder_.clear();
}
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.status_ = status_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.payload_ = payload_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.message_ = message_;
if (cipherOptionBuilder_ == null) {
if (((bitField0_ & 0x00000008) == 0x00000008)) {
cipherOption_ = java.util.Collections.unmodifiableList(cipherOption_);
bitField0_ = (bitField0_ & ~0x00000008);
}
result.cipherOption_ = cipherOption_;
} else {
result.cipherOption_ = cipherOptionBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.getDefaultInstance()) return this;
if (other.hasStatus()) {
setStatus(other.getStatus());
}
if (other.hasPayload()) {
setPayload(other.getPayload());
}
if (other.hasMessage()) {
bitField0_ |= 0x00000004;
message_ = other.message_;
onChanged();
}
if (cipherOptionBuilder_ == null) {
if (!other.cipherOption_.isEmpty()) {
if (cipherOption_.isEmpty()) {
cipherOption_ = other.cipherOption_;
bitField0_ = (bitField0_ & ~0x00000008);
} else {
ensureCipherOptionIsMutable();
cipherOption_.addAll(other.cipherOption_);
}
onChanged();
}
} else {
if (!other.cipherOption_.isEmpty()) {
if (cipherOptionBuilder_.isEmpty()) {
cipherOptionBuilder_.dispose();
cipherOptionBuilder_ = null;
cipherOption_ = other.cipherOption_;
bitField0_ = (bitField0_ & ~0x00000008);
cipherOptionBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getCipherOptionFieldBuilder() : null;
} else {
cipherOptionBuilder_.addAllMessages(other.cipherOption_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasStatus()) {
return false;
}
for (int i = 0; i < getCipherOptionCount(); i++) {
if (!getCipherOption(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus status = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus.SUCCESS;
/**
* required .hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus status = 1;
*/
public boolean hasStatus() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus status = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus getStatus() {
return status_;
}
/**
* required .hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus status = 1;
*/
public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
status_ = value;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus status = 1;
*/
public Builder clearStatus() {
bitField0_ = (bitField0_ & ~0x00000001);
status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus.SUCCESS;
onChanged();
return this;
}
// optional bytes payload = 2;
private com.google.protobuf.ByteString payload_ = com.google.protobuf.ByteString.EMPTY;
/**
* optional bytes payload = 2;
*/
public boolean hasPayload() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional bytes payload = 2;
*/
public com.google.protobuf.ByteString getPayload() {
return payload_;
}
/**
* optional bytes payload = 2;
*/
public Builder setPayload(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
payload_ = value;
onChanged();
return this;
}
/**
* optional bytes payload = 2;
*/
public Builder clearPayload() {
bitField0_ = (bitField0_ & ~0x00000002);
payload_ = getDefaultInstance().getPayload();
onChanged();
return this;
}
// optional string message = 3;
private java.lang.Object message_ = "";
/**
* optional string message = 3;
*/
public boolean hasMessage() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional string message = 3;
*/
public java.lang.String getMessage() {
java.lang.Object ref = message_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
message_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string message = 3;
*/
public com.google.protobuf.ByteString
getMessageBytes() {
java.lang.Object ref = message_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
message_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* optional string message = 3;
*/
public Builder setMessage(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
message_ = value;
onChanged();
return this;
}
/**
* optional string message = 3;
*/
public Builder clearMessage() {
bitField0_ = (bitField0_ & ~0x00000004);
message_ = getDefaultInstance().getMessage();
onChanged();
return this;
}
/**
* optional string message = 3;
*/
public Builder setMessageBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
message_ = value;
onChanged();
return this;
}
// repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
private java.util.List cipherOption_ =
java.util.Collections.emptyList();
private void ensureCipherOptionIsMutable() {
if (!((bitField0_ & 0x00000008) == 0x00000008)) {
cipherOption_ = new java.util.ArrayList(cipherOption_);
bitField0_ |= 0x00000008;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder> cipherOptionBuilder_;
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public java.util.List getCipherOptionList() {
if (cipherOptionBuilder_ == null) {
return java.util.Collections.unmodifiableList(cipherOption_);
} else {
return cipherOptionBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public int getCipherOptionCount() {
if (cipherOptionBuilder_ == null) {
return cipherOption_.size();
} else {
return cipherOptionBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto getCipherOption(int index) {
if (cipherOptionBuilder_ == null) {
return cipherOption_.get(index);
} else {
return cipherOptionBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public Builder setCipherOption(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto value) {
if (cipherOptionBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureCipherOptionIsMutable();
cipherOption_.set(index, value);
onChanged();
} else {
cipherOptionBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public Builder setCipherOption(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder builderForValue) {
if (cipherOptionBuilder_ == null) {
ensureCipherOptionIsMutable();
cipherOption_.set(index, builderForValue.build());
onChanged();
} else {
cipherOptionBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public Builder addCipherOption(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto value) {
if (cipherOptionBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureCipherOptionIsMutable();
cipherOption_.add(value);
onChanged();
} else {
cipherOptionBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public Builder addCipherOption(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto value) {
if (cipherOptionBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureCipherOptionIsMutable();
cipherOption_.add(index, value);
onChanged();
} else {
cipherOptionBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public Builder addCipherOption(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder builderForValue) {
if (cipherOptionBuilder_ == null) {
ensureCipherOptionIsMutable();
cipherOption_.add(builderForValue.build());
onChanged();
} else {
cipherOptionBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public Builder addCipherOption(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder builderForValue) {
if (cipherOptionBuilder_ == null) {
ensureCipherOptionIsMutable();
cipherOption_.add(index, builderForValue.build());
onChanged();
} else {
cipherOptionBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public Builder addAllCipherOption(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto> values) {
if (cipherOptionBuilder_ == null) {
ensureCipherOptionIsMutable();
super.addAll(values, cipherOption_);
onChanged();
} else {
cipherOptionBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public Builder clearCipherOption() {
if (cipherOptionBuilder_ == null) {
cipherOption_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000008);
onChanged();
} else {
cipherOptionBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public Builder removeCipherOption(int index) {
if (cipherOptionBuilder_ == null) {
ensureCipherOptionIsMutable();
cipherOption_.remove(index);
onChanged();
} else {
cipherOptionBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder getCipherOptionBuilder(
int index) {
return getCipherOptionFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder getCipherOptionOrBuilder(
int index) {
if (cipherOptionBuilder_ == null) {
return cipherOption_.get(index); } else {
return cipherOptionBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder>
getCipherOptionOrBuilderList() {
if (cipherOptionBuilder_ != null) {
return cipherOptionBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(cipherOption_);
}
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder addCipherOptionBuilder() {
return getCipherOptionFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder addCipherOptionBuilder(
int index) {
return getCipherOptionFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;
*/
public java.util.List
getCipherOptionBuilderList() {
return getCipherOptionFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder>
getCipherOptionFieldBuilder() {
if (cipherOptionBuilder_ == null) {
cipherOptionBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder>(
cipherOption_,
((bitField0_ & 0x00000008) == 0x00000008),
getParentForChildren(),
isClean());
cipherOption_ = null;
}
return cipherOptionBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DataTransferEncryptorMessageProto)
}
static {
defaultInstance = new DataTransferEncryptorMessageProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DataTransferEncryptorMessageProto)
}
public interface BaseHeaderProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.ExtendedBlockProto block = 1;
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
boolean hasBlock();
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock();
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder();
// optional .hadoop.common.TokenProto token = 2;
/**
* optional .hadoop.common.TokenProto token = 2;
*/
boolean hasToken();
/**
* optional .hadoop.common.TokenProto token = 2;
*/
org.apache.hadoop.security.proto.SecurityProtos.TokenProto getToken();
/**
* optional .hadoop.common.TokenProto token = 2;
*/
org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getTokenOrBuilder();
// optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
*/
boolean hasTraceInfo();
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getTraceInfo();
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder getTraceInfoOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.BaseHeaderProto}
*/
public static final class BaseHeaderProto extends
com.google.protobuf.GeneratedMessage
implements BaseHeaderProtoOrBuilder {
// Use BaseHeaderProto.newBuilder() to construct.
private BaseHeaderProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private BaseHeaderProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final BaseHeaderProto defaultInstance;
public static BaseHeaderProto getDefaultInstance() {
return defaultInstance;
}
public BaseHeaderProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private BaseHeaderProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = block_.toBuilder();
}
block_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(block_);
block_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
subBuilder = token_.toBuilder();
}
token_ = input.readMessage(org.apache.hadoop.security.proto.SecurityProtos.TokenProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(token_);
token_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
case 26: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
subBuilder = traceInfo_.toBuilder();
}
traceInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(traceInfo_);
traceInfo_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000004;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BaseHeaderProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BaseHeaderProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public BaseHeaderProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new BaseHeaderProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.ExtendedBlockProto block = 1;
public static final int BLOCK_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_;
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public boolean hasBlock() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
return block_;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
return block_;
}
// optional .hadoop.common.TokenProto token = 2;
public static final int TOKEN_FIELD_NUMBER = 2;
private org.apache.hadoop.security.proto.SecurityProtos.TokenProto token_;
/**
* optional .hadoop.common.TokenProto token = 2;
*/
public boolean hasToken() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional .hadoop.common.TokenProto token = 2;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getToken() {
return token_;
}
/**
* optional .hadoop.common.TokenProto token = 2;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getTokenOrBuilder() {
return token_;
}
// optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
public static final int TRACEINFO_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto traceInfo_;
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
*/
public boolean hasTraceInfo() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getTraceInfo() {
return traceInfo_;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder getTraceInfoOrBuilder() {
return traceInfo_;
}
private void initFields() {
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
token_ = org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance();
traceInfo_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasBlock()) {
memoizedIsInitialized = 0;
return false;
}
if (!getBlock().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
if (hasToken()) {
if (!getToken().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasTraceInfo()) {
if (!getTraceInfo().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, block_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeMessage(2, token_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeMessage(3, traceInfo_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, block_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, token_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, traceInfo_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto) obj;
boolean result = true;
result = result && (hasBlock() == other.hasBlock());
if (hasBlock()) {
result = result && getBlock()
.equals(other.getBlock());
}
result = result && (hasToken() == other.hasToken());
if (hasToken()) {
result = result && getToken()
.equals(other.getToken());
}
result = result && (hasTraceInfo() == other.hasTraceInfo());
if (hasTraceInfo()) {
result = result && getTraceInfo()
.equals(other.getTraceInfo());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasBlock()) {
hash = (37 * hash) + BLOCK_FIELD_NUMBER;
hash = (53 * hash) + getBlock().hashCode();
}
if (hasToken()) {
hash = (37 * hash) + TOKEN_FIELD_NUMBER;
hash = (53 * hash) + getToken().hashCode();
}
if (hasTraceInfo()) {
hash = (37 * hash) + TRACEINFO_FIELD_NUMBER;
hash = (53 * hash) + getTraceInfo().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.BaseHeaderProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BaseHeaderProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BaseHeaderProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getBlockFieldBuilder();
getTokenFieldBuilder();
getTraceInfoFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (blockBuilder_ == null) {
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
if (tokenBuilder_ == null) {
token_ = org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance();
} else {
tokenBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
if (traceInfoBuilder_ == null) {
traceInfo_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance();
} else {
traceInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BaseHeaderProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (blockBuilder_ == null) {
result.block_ = block_;
} else {
result.block_ = blockBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
if (tokenBuilder_ == null) {
result.token_ = token_;
} else {
result.token_ = tokenBuilder_.build();
}
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
if (traceInfoBuilder_ == null) {
result.traceInfo_ = traceInfo_;
} else {
result.traceInfo_ = traceInfoBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) return this;
if (other.hasBlock()) {
mergeBlock(other.getBlock());
}
if (other.hasToken()) {
mergeToken(other.getToken());
}
if (other.hasTraceInfo()) {
mergeTraceInfo(other.getTraceInfo());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasBlock()) {
return false;
}
if (!getBlock().isInitialized()) {
return false;
}
if (hasToken()) {
if (!getToken().isInitialized()) {
return false;
}
}
if (hasTraceInfo()) {
if (!getTraceInfo().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.ExtendedBlockProto block = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_;
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public boolean hasBlock() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
if (blockBuilder_ == null) {
return block_;
} else {
return blockBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
if (blockBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
block_ = value;
onChanged();
} else {
blockBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder setBlock(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) {
if (blockBuilder_ == null) {
block_ = builderForValue.build();
onChanged();
} else {
blockBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
if (blockBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) {
block_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial();
} else {
block_ = value;
}
onChanged();
} else {
blockBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder clearBlock() {
if (blockBuilder_ == null) {
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
onChanged();
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getBlockFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
if (blockBuilder_ != null) {
return blockBuilder_.getMessageOrBuilder();
} else {
return block_;
}
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>
getBlockFieldBuilder() {
if (blockBuilder_ == null) {
blockBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>(
block_,
getParentForChildren(),
isClean());
block_ = null;
}
return blockBuilder_;
}
// optional .hadoop.common.TokenProto token = 2;
private org.apache.hadoop.security.proto.SecurityProtos.TokenProto token_ = org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> tokenBuilder_;
/**
* optional .hadoop.common.TokenProto token = 2;
*/
public boolean hasToken() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional .hadoop.common.TokenProto token = 2;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getToken() {
if (tokenBuilder_ == null) {
return token_;
} else {
return tokenBuilder_.getMessage();
}
}
/**
* optional .hadoop.common.TokenProto token = 2;
*/
public Builder setToken(org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
if (tokenBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
token_ = value;
onChanged();
} else {
tokenBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* optional .hadoop.common.TokenProto token = 2;
*/
public Builder setToken(
org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) {
if (tokenBuilder_ == null) {
token_ = builderForValue.build();
onChanged();
} else {
tokenBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
return this;
}
/**
* optional .hadoop.common.TokenProto token = 2;
*/
public Builder mergeToken(org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
if (tokenBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002) &&
token_ != org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance()) {
token_ =
org.apache.hadoop.security.proto.SecurityProtos.TokenProto.newBuilder(token_).mergeFrom(value).buildPartial();
} else {
token_ = value;
}
onChanged();
} else {
tokenBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* optional .hadoop.common.TokenProto token = 2;
*/
public Builder clearToken() {
if (tokenBuilder_ == null) {
token_ = org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance();
onChanged();
} else {
tokenBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
/**
* optional .hadoop.common.TokenProto token = 2;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder getTokenBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getTokenFieldBuilder().getBuilder();
}
/**
* optional .hadoop.common.TokenProto token = 2;
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getTokenOrBuilder() {
if (tokenBuilder_ != null) {
return tokenBuilder_.getMessageOrBuilder();
} else {
return token_;
}
}
/**
* optional .hadoop.common.TokenProto token = 2;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>
getTokenFieldBuilder() {
if (tokenBuilder_ == null) {
tokenBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>(
token_,
getParentForChildren(),
isClean());
token_ = null;
}
return tokenBuilder_;
}
// optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto traceInfo_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder> traceInfoBuilder_;
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
*/
public boolean hasTraceInfo() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getTraceInfo() {
if (traceInfoBuilder_ == null) {
return traceInfo_;
} else {
return traceInfoBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
*/
public Builder setTraceInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto value) {
if (traceInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
traceInfo_ = value;
onChanged();
} else {
traceInfoBuilder_.setMessage(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
*/
public Builder setTraceInfo(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder builderForValue) {
if (traceInfoBuilder_ == null) {
traceInfo_ = builderForValue.build();
onChanged();
} else {
traceInfoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
*/
public Builder mergeTraceInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto value) {
if (traceInfoBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004) &&
traceInfo_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance()) {
traceInfo_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.newBuilder(traceInfo_).mergeFrom(value).buildPartial();
} else {
traceInfo_ = value;
}
onChanged();
} else {
traceInfoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
*/
public Builder clearTraceInfo() {
if (traceInfoBuilder_ == null) {
traceInfo_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance();
onChanged();
} else {
traceInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder getTraceInfoBuilder() {
bitField0_ |= 0x00000004;
onChanged();
return getTraceInfoFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder getTraceInfoOrBuilder() {
if (traceInfoBuilder_ != null) {
return traceInfoBuilder_.getMessageOrBuilder();
} else {
return traceInfo_;
}
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder>
getTraceInfoFieldBuilder() {
if (traceInfoBuilder_ == null) {
traceInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder>(
traceInfo_,
getParentForChildren(),
isClean());
traceInfo_ = null;
}
return traceInfoBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.BaseHeaderProto)
}
static {
defaultInstance = new BaseHeaderProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.BaseHeaderProto)
}
public interface DataTransferTraceInfoProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required uint64 traceId = 1;
/**
* required uint64 traceId = 1;
*/
boolean hasTraceId();
/**
* required uint64 traceId = 1;
*/
long getTraceId();
// required uint64 parentId = 2;
/**
* required uint64 parentId = 2;
*/
boolean hasParentId();
/**
* required uint64 parentId = 2;
*/
long getParentId();
}
/**
* Protobuf type {@code hadoop.hdfs.DataTransferTraceInfoProto}
*/
public static final class DataTransferTraceInfoProto extends
com.google.protobuf.GeneratedMessage
implements DataTransferTraceInfoProtoOrBuilder {
// Use DataTransferTraceInfoProto.newBuilder() to construct.
private DataTransferTraceInfoProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private DataTransferTraceInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final DataTransferTraceInfoProto defaultInstance;
public static DataTransferTraceInfoProto getDefaultInstance() {
return defaultInstance;
}
public DataTransferTraceInfoProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DataTransferTraceInfoProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
traceId_ = input.readUInt64();
break;
}
case 16: {
bitField0_ |= 0x00000002;
parentId_ = input.readUInt64();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DataTransferTraceInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DataTransferTraceInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public DataTransferTraceInfoProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new DataTransferTraceInfoProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required uint64 traceId = 1;
public static final int TRACEID_FIELD_NUMBER = 1;
private long traceId_;
/**
* required uint64 traceId = 1;
*/
public boolean hasTraceId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint64 traceId = 1;
*/
public long getTraceId() {
return traceId_;
}
// required uint64 parentId = 2;
public static final int PARENTID_FIELD_NUMBER = 2;
private long parentId_;
/**
* required uint64 parentId = 2;
*/
public boolean hasParentId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 parentId = 2;
*/
public long getParentId() {
return parentId_;
}
private void initFields() {
traceId_ = 0L;
parentId_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasTraceId()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasParentId()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt64(1, traceId_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, parentId_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(1, traceId_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, parentId_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto) obj;
boolean result = true;
result = result && (hasTraceId() == other.hasTraceId());
if (hasTraceId()) {
result = result && (getTraceId()
== other.getTraceId());
}
result = result && (hasParentId() == other.hasParentId());
if (hasParentId()) {
result = result && (getParentId()
== other.getParentId());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasTraceId()) {
hash = (37 * hash) + TRACEID_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getTraceId());
}
if (hasParentId()) {
hash = (37 * hash) + PARENTID_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getParentId());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.DataTransferTraceInfoProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DataTransferTraceInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DataTransferTraceInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
traceId_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
parentId_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DataTransferTraceInfoProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.traceId_ = traceId_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.parentId_ = parentId_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance()) return this;
if (other.hasTraceId()) {
setTraceId(other.getTraceId());
}
if (other.hasParentId()) {
setParentId(other.getParentId());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasTraceId()) {
return false;
}
if (!hasParentId()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required uint64 traceId = 1;
private long traceId_ ;
/**
* required uint64 traceId = 1;
*/
public boolean hasTraceId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint64 traceId = 1;
*/
public long getTraceId() {
return traceId_;
}
/**
* required uint64 traceId = 1;
*/
public Builder setTraceId(long value) {
bitField0_ |= 0x00000001;
traceId_ = value;
onChanged();
return this;
}
/**
* required uint64 traceId = 1;
*/
public Builder clearTraceId() {
bitField0_ = (bitField0_ & ~0x00000001);
traceId_ = 0L;
onChanged();
return this;
}
// required uint64 parentId = 2;
private long parentId_ ;
/**
* required uint64 parentId = 2;
*/
public boolean hasParentId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 parentId = 2;
*/
public long getParentId() {
return parentId_;
}
/**
* required uint64 parentId = 2;
*/
public Builder setParentId(long value) {
bitField0_ |= 0x00000002;
parentId_ = value;
onChanged();
return this;
}
/**
* required uint64 parentId = 2;
*/
public Builder clearParentId() {
bitField0_ = (bitField0_ & ~0x00000002);
parentId_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DataTransferTraceInfoProto)
}
static {
defaultInstance = new DataTransferTraceInfoProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DataTransferTraceInfoProto)
}
public interface ClientOperationHeaderProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
/**
* required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
*/
boolean hasBaseHeader();
/**
* required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getBaseHeader();
/**
* required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getBaseHeaderOrBuilder();
// required string clientName = 2;
/**
* required string clientName = 2;
*/
boolean hasClientName();
/**
* required string clientName = 2;
*/
java.lang.String getClientName();
/**
* required string clientName = 2;
*/
com.google.protobuf.ByteString
getClientNameBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.ClientOperationHeaderProto}
*/
public static final class ClientOperationHeaderProto extends
com.google.protobuf.GeneratedMessage
implements ClientOperationHeaderProtoOrBuilder {
// Use ClientOperationHeaderProto.newBuilder() to construct.
private ClientOperationHeaderProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ClientOperationHeaderProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ClientOperationHeaderProto defaultInstance;
public static ClientOperationHeaderProto getDefaultInstance() {
return defaultInstance;
}
public ClientOperationHeaderProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ClientOperationHeaderProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = baseHeader_.toBuilder();
}
baseHeader_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(baseHeader_);
baseHeader_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
bitField0_ |= 0x00000002;
clientName_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientOperationHeaderProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientOperationHeaderProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ClientOperationHeaderProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ClientOperationHeaderProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
public static final int BASEHEADER_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto baseHeader_;
/**
* required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
*/
public boolean hasBaseHeader() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getBaseHeader() {
return baseHeader_;
}
/**
* required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getBaseHeaderOrBuilder() {
return baseHeader_;
}
// required string clientName = 2;
public static final int CLIENTNAME_FIELD_NUMBER = 2;
private java.lang.Object clientName_;
/**
* required string clientName = 2;
*/
public boolean hasClientName() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string clientName = 2;
*/
public java.lang.String getClientName() {
java.lang.Object ref = clientName_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
clientName_ = s;
}
return s;
}
}
/**
* required string clientName = 2;
*/
public com.google.protobuf.ByteString
getClientNameBytes() {
java.lang.Object ref = clientName_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
clientName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
baseHeader_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
clientName_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasBaseHeader()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasClientName()) {
memoizedIsInitialized = 0;
return false;
}
if (!getBaseHeader().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, baseHeader_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getClientNameBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, baseHeader_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getClientNameBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto) obj;
boolean result = true;
result = result && (hasBaseHeader() == other.hasBaseHeader());
if (hasBaseHeader()) {
result = result && getBaseHeader()
.equals(other.getBaseHeader());
}
result = result && (hasClientName() == other.hasClientName());
if (hasClientName()) {
result = result && getClientName()
.equals(other.getClientName());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasBaseHeader()) {
hash = (37 * hash) + BASEHEADER_FIELD_NUMBER;
hash = (53 * hash) + getBaseHeader().hashCode();
}
if (hasClientName()) {
hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER;
hash = (53 * hash) + getClientName().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ClientOperationHeaderProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientOperationHeaderProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientOperationHeaderProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getBaseHeaderFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (baseHeaderBuilder_ == null) {
baseHeader_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
} else {
baseHeaderBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
clientName_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientOperationHeaderProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (baseHeaderBuilder_ == null) {
result.baseHeader_ = baseHeader_;
} else {
result.baseHeader_ = baseHeaderBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.clientName_ = clientName_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance()) return this;
if (other.hasBaseHeader()) {
mergeBaseHeader(other.getBaseHeader());
}
if (other.hasClientName()) {
bitField0_ |= 0x00000002;
clientName_ = other.clientName_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasBaseHeader()) {
return false;
}
if (!hasClientName()) {
return false;
}
if (!getBaseHeader().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto baseHeader_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> baseHeaderBuilder_;
/**
* required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
*/
public boolean hasBaseHeader() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getBaseHeader() {
if (baseHeaderBuilder_ == null) {
return baseHeader_;
} else {
return baseHeaderBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
*/
public Builder setBaseHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
if (baseHeaderBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
baseHeader_ = value;
onChanged();
} else {
baseHeaderBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
*/
public Builder setBaseHeader(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder builderForValue) {
if (baseHeaderBuilder_ == null) {
baseHeader_ = builderForValue.build();
onChanged();
} else {
baseHeaderBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
*/
public Builder mergeBaseHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
if (baseHeaderBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
baseHeader_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
baseHeader_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(baseHeader_).mergeFrom(value).buildPartial();
} else {
baseHeader_ = value;
}
onChanged();
} else {
baseHeaderBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
*/
public Builder clearBaseHeader() {
if (baseHeaderBuilder_ == null) {
baseHeader_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
onChanged();
} else {
baseHeaderBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder getBaseHeaderBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getBaseHeaderFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getBaseHeaderOrBuilder() {
if (baseHeaderBuilder_ != null) {
return baseHeaderBuilder_.getMessageOrBuilder();
} else {
return baseHeader_;
}
}
/**
* required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>
getBaseHeaderFieldBuilder() {
if (baseHeaderBuilder_ == null) {
baseHeaderBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>(
baseHeader_,
getParentForChildren(),
isClean());
baseHeader_ = null;
}
return baseHeaderBuilder_;
}
// required string clientName = 2;
private java.lang.Object clientName_ = "";
/**
* required string clientName = 2;
*/
public boolean hasClientName() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string clientName = 2;
*/
public java.lang.String getClientName() {
java.lang.Object ref = clientName_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
clientName_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string clientName = 2;
*/
public com.google.protobuf.ByteString
getClientNameBytes() {
java.lang.Object ref = clientName_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
clientName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string clientName = 2;
*/
public Builder setClientName(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
clientName_ = value;
onChanged();
return this;
}
/**
* required string clientName = 2;
*/
public Builder clearClientName() {
bitField0_ = (bitField0_ & ~0x00000002);
clientName_ = getDefaultInstance().getClientName();
onChanged();
return this;
}
/**
* required string clientName = 2;
*/
public Builder setClientNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
clientName_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ClientOperationHeaderProto)
}
static {
defaultInstance = new ClientOperationHeaderProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ClientOperationHeaderProto)
}
public interface CachingStrategyProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// optional bool dropBehind = 1;
/**
* optional bool dropBehind = 1;
*/
boolean hasDropBehind();
/**
* optional bool dropBehind = 1;
*/
boolean getDropBehind();
// optional int64 readahead = 2;
/**
* optional int64 readahead = 2;
*/
boolean hasReadahead();
/**
* optional int64 readahead = 2;
*/
long getReadahead();
}
/**
* Protobuf type {@code hadoop.hdfs.CachingStrategyProto}
*/
public static final class CachingStrategyProto extends
com.google.protobuf.GeneratedMessage
implements CachingStrategyProtoOrBuilder {
// Use CachingStrategyProto.newBuilder() to construct.
private CachingStrategyProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private CachingStrategyProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final CachingStrategyProto defaultInstance;
public static CachingStrategyProto getDefaultInstance() {
return defaultInstance;
}
public CachingStrategyProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private CachingStrategyProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
dropBehind_ = input.readBool();
break;
}
case 16: {
bitField0_ |= 0x00000002;
readahead_ = input.readInt64();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_CachingStrategyProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_CachingStrategyProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public CachingStrategyProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new CachingStrategyProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// optional bool dropBehind = 1;
public static final int DROPBEHIND_FIELD_NUMBER = 1;
private boolean dropBehind_;
/**
* optional bool dropBehind = 1;
*/
public boolean hasDropBehind() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* optional bool dropBehind = 1;
*/
public boolean getDropBehind() {
return dropBehind_;
}
// optional int64 readahead = 2;
public static final int READAHEAD_FIELD_NUMBER = 2;
private long readahead_;
/**
* optional int64 readahead = 2;
*/
public boolean hasReadahead() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional int64 readahead = 2;
*/
public long getReadahead() {
return readahead_;
}
private void initFields() {
dropBehind_ = false;
readahead_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBool(1, dropBehind_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeInt64(2, readahead_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(1, dropBehind_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(2, readahead_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto) obj;
boolean result = true;
result = result && (hasDropBehind() == other.hasDropBehind());
if (hasDropBehind()) {
result = result && (getDropBehind()
== other.getDropBehind());
}
result = result && (hasReadahead() == other.hasReadahead());
if (hasReadahead()) {
result = result && (getReadahead()
== other.getReadahead());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasDropBehind()) {
hash = (37 * hash) + DROPBEHIND_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getDropBehind());
}
if (hasReadahead()) {
hash = (37 * hash) + READAHEAD_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getReadahead());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.CachingStrategyProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_CachingStrategyProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_CachingStrategyProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
dropBehind_ = false;
bitField0_ = (bitField0_ & ~0x00000001);
readahead_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_CachingStrategyProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.dropBehind_ = dropBehind_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.readahead_ = readahead_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance()) return this;
if (other.hasDropBehind()) {
setDropBehind(other.getDropBehind());
}
if (other.hasReadahead()) {
setReadahead(other.getReadahead());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// optional bool dropBehind = 1;
private boolean dropBehind_ ;
/**
* optional bool dropBehind = 1;
*/
public boolean hasDropBehind() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* optional bool dropBehind = 1;
*/
public boolean getDropBehind() {
return dropBehind_;
}
/**
* optional bool dropBehind = 1;
*/
public Builder setDropBehind(boolean value) {
bitField0_ |= 0x00000001;
dropBehind_ = value;
onChanged();
return this;
}
/**
* optional bool dropBehind = 1;
*/
public Builder clearDropBehind() {
bitField0_ = (bitField0_ & ~0x00000001);
dropBehind_ = false;
onChanged();
return this;
}
// optional int64 readahead = 2;
private long readahead_ ;
/**
* optional int64 readahead = 2;
*/
public boolean hasReadahead() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional int64 readahead = 2;
*/
public long getReadahead() {
return readahead_;
}
/**
* optional int64 readahead = 2;
*/
public Builder setReadahead(long value) {
bitField0_ |= 0x00000002;
readahead_ = value;
onChanged();
return this;
}
/**
* optional int64 readahead = 2;
*/
public Builder clearReadahead() {
bitField0_ = (bitField0_ & ~0x00000002);
readahead_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.CachingStrategyProto)
}
static {
defaultInstance = new CachingStrategyProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.CachingStrategyProto)
}
public interface OpReadBlockProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
boolean hasHeader();
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader();
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder();
// required uint64 offset = 2;
/**
* required uint64 offset = 2;
*/
boolean hasOffset();
/**
* required uint64 offset = 2;
*/
long getOffset();
// required uint64 len = 3;
/**
* required uint64 len = 3;
*/
boolean hasLen();
/**
* required uint64 len = 3;
*/
long getLen();
// optional bool sendChecksums = 4 [default = true];
/**
* optional bool sendChecksums = 4 [default = true];
*/
boolean hasSendChecksums();
/**
* optional bool sendChecksums = 4 [default = true];
*/
boolean getSendChecksums();
// optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
*/
boolean hasCachingStrategy();
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto getCachingStrategy();
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder getCachingStrategyOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.OpReadBlockProto}
*/
public static final class OpReadBlockProto extends
com.google.protobuf.GeneratedMessage
implements OpReadBlockProtoOrBuilder {
// Use OpReadBlockProto.newBuilder() to construct.
private OpReadBlockProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private OpReadBlockProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final OpReadBlockProto defaultInstance;
public static OpReadBlockProto getDefaultInstance() {
return defaultInstance;
}
public OpReadBlockProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private OpReadBlockProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = header_.toBuilder();
}
header_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(header_);
header_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 16: {
bitField0_ |= 0x00000002;
offset_ = input.readUInt64();
break;
}
case 24: {
bitField0_ |= 0x00000004;
len_ = input.readUInt64();
break;
}
case 32: {
bitField0_ |= 0x00000008;
sendChecksums_ = input.readBool();
break;
}
case 42: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000010) == 0x00000010)) {
subBuilder = cachingStrategy_.toBuilder();
}
cachingStrategy_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(cachingStrategy_);
cachingStrategy_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000010;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpReadBlockProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpReadBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public OpReadBlockProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new OpReadBlockProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
public static final int HEADER_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_;
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
return header_;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
return header_;
}
// required uint64 offset = 2;
public static final int OFFSET_FIELD_NUMBER = 2;
private long offset_;
/**
* required uint64 offset = 2;
*/
public boolean hasOffset() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 offset = 2;
*/
public long getOffset() {
return offset_;
}
// required uint64 len = 3;
public static final int LEN_FIELD_NUMBER = 3;
private long len_;
/**
* required uint64 len = 3;
*/
public boolean hasLen() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 len = 3;
*/
public long getLen() {
return len_;
}
// optional bool sendChecksums = 4 [default = true];
public static final int SENDCHECKSUMS_FIELD_NUMBER = 4;
private boolean sendChecksums_;
/**
* optional bool sendChecksums = 4 [default = true];
*/
public boolean hasSendChecksums() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional bool sendChecksums = 4 [default = true];
*/
public boolean getSendChecksums() {
return sendChecksums_;
}
// optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
public static final int CACHINGSTRATEGY_FIELD_NUMBER = 5;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto cachingStrategy_;
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
*/
public boolean hasCachingStrategy() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto getCachingStrategy() {
return cachingStrategy_;
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder getCachingStrategyOrBuilder() {
return cachingStrategy_;
}
private void initFields() {
header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
offset_ = 0L;
len_ = 0L;
sendChecksums_ = true;
cachingStrategy_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasHeader()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasOffset()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasLen()) {
memoizedIsInitialized = 0;
return false;
}
if (!getHeader().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, header_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, offset_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt64(3, len_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeBool(4, sendChecksums_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeMessage(5, cachingStrategy_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, header_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, offset_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(3, len_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(4, sendChecksums_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(5, cachingStrategy_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto) obj;
boolean result = true;
result = result && (hasHeader() == other.hasHeader());
if (hasHeader()) {
result = result && getHeader()
.equals(other.getHeader());
}
result = result && (hasOffset() == other.hasOffset());
if (hasOffset()) {
result = result && (getOffset()
== other.getOffset());
}
result = result && (hasLen() == other.hasLen());
if (hasLen()) {
result = result && (getLen()
== other.getLen());
}
result = result && (hasSendChecksums() == other.hasSendChecksums());
if (hasSendChecksums()) {
result = result && (getSendChecksums()
== other.getSendChecksums());
}
result = result && (hasCachingStrategy() == other.hasCachingStrategy());
if (hasCachingStrategy()) {
result = result && getCachingStrategy()
.equals(other.getCachingStrategy());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasHeader()) {
hash = (37 * hash) + HEADER_FIELD_NUMBER;
hash = (53 * hash) + getHeader().hashCode();
}
if (hasOffset()) {
hash = (37 * hash) + OFFSET_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getOffset());
}
if (hasLen()) {
hash = (37 * hash) + LEN_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getLen());
}
if (hasSendChecksums()) {
hash = (37 * hash) + SENDCHECKSUMS_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getSendChecksums());
}
if (hasCachingStrategy()) {
hash = (37 * hash) + CACHINGSTRATEGY_FIELD_NUMBER;
hash = (53 * hash) + getCachingStrategy().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.OpReadBlockProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpReadBlockProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpReadBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getHeaderFieldBuilder();
getCachingStrategyFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (headerBuilder_ == null) {
header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
offset_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
len_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
sendChecksums_ = true;
bitField0_ = (bitField0_ & ~0x00000008);
if (cachingStrategyBuilder_ == null) {
cachingStrategy_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance();
} else {
cachingStrategyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpReadBlockProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (headerBuilder_ == null) {
result.header_ = header_;
} else {
result.header_ = headerBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.offset_ = offset_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.len_ = len_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.sendChecksums_ = sendChecksums_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
if (cachingStrategyBuilder_ == null) {
result.cachingStrategy_ = cachingStrategy_;
} else {
result.cachingStrategy_ = cachingStrategyBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.getDefaultInstance()) return this;
if (other.hasHeader()) {
mergeHeader(other.getHeader());
}
if (other.hasOffset()) {
setOffset(other.getOffset());
}
if (other.hasLen()) {
setLen(other.getLen());
}
if (other.hasSendChecksums()) {
setSendChecksums(other.getSendChecksums());
}
if (other.hasCachingStrategy()) {
mergeCachingStrategy(other.getCachingStrategy());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasHeader()) {
return false;
}
if (!hasOffset()) {
return false;
}
if (!hasLen()) {
return false;
}
if (!getHeader().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder> headerBuilder_;
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
if (headerBuilder_ == null) {
return header_;
} else {
return headerBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) {
if (headerBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
header_ = value;
onChanged();
} else {
headerBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public Builder setHeader(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder builderForValue) {
if (headerBuilder_ == null) {
header_ = builderForValue.build();
onChanged();
} else {
headerBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) {
if (headerBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance()) {
header_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial();
} else {
header_ = value;
}
onChanged();
} else {
headerBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public Builder clearHeader() {
if (headerBuilder_ == null) {
header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
onChanged();
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder getHeaderBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getHeaderFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
if (headerBuilder_ != null) {
return headerBuilder_.getMessageOrBuilder();
} else {
return header_;
}
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder>
getHeaderFieldBuilder() {
if (headerBuilder_ == null) {
headerBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder>(
header_,
getParentForChildren(),
isClean());
header_ = null;
}
return headerBuilder_;
}
// required uint64 offset = 2;
private long offset_ ;
/**
* required uint64 offset = 2;
*/
public boolean hasOffset() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 offset = 2;
*/
public long getOffset() {
return offset_;
}
/**
* required uint64 offset = 2;
*/
public Builder setOffset(long value) {
bitField0_ |= 0x00000002;
offset_ = value;
onChanged();
return this;
}
/**
* required uint64 offset = 2;
*/
public Builder clearOffset() {
bitField0_ = (bitField0_ & ~0x00000002);
offset_ = 0L;
onChanged();
return this;
}
// required uint64 len = 3;
private long len_ ;
/**
* required uint64 len = 3;
*/
public boolean hasLen() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required uint64 len = 3;
*/
public long getLen() {
return len_;
}
/**
* required uint64 len = 3;
*/
public Builder setLen(long value) {
bitField0_ |= 0x00000004;
len_ = value;
onChanged();
return this;
}
/**
* required uint64 len = 3;
*/
public Builder clearLen() {
bitField0_ = (bitField0_ & ~0x00000004);
len_ = 0L;
onChanged();
return this;
}
// optional bool sendChecksums = 4 [default = true];
private boolean sendChecksums_ = true;
/**
* optional bool sendChecksums = 4 [default = true];
*/
public boolean hasSendChecksums() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional bool sendChecksums = 4 [default = true];
*/
public boolean getSendChecksums() {
return sendChecksums_;
}
/**
* optional bool sendChecksums = 4 [default = true];
*/
public Builder setSendChecksums(boolean value) {
bitField0_ |= 0x00000008;
sendChecksums_ = value;
onChanged();
return this;
}
/**
* optional bool sendChecksums = 4 [default = true];
*/
public Builder clearSendChecksums() {
bitField0_ = (bitField0_ & ~0x00000008);
sendChecksums_ = true;
onChanged();
return this;
}
// optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto cachingStrategy_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder> cachingStrategyBuilder_;
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
*/
public boolean hasCachingStrategy() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto getCachingStrategy() {
if (cachingStrategyBuilder_ == null) {
return cachingStrategy_;
} else {
return cachingStrategyBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
*/
public Builder setCachingStrategy(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto value) {
if (cachingStrategyBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
cachingStrategy_ = value;
onChanged();
} else {
cachingStrategyBuilder_.setMessage(value);
}
bitField0_ |= 0x00000010;
return this;
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
*/
public Builder setCachingStrategy(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder builderForValue) {
if (cachingStrategyBuilder_ == null) {
cachingStrategy_ = builderForValue.build();
onChanged();
} else {
cachingStrategyBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000010;
return this;
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
*/
public Builder mergeCachingStrategy(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto value) {
if (cachingStrategyBuilder_ == null) {
if (((bitField0_ & 0x00000010) == 0x00000010) &&
cachingStrategy_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance()) {
cachingStrategy_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.newBuilder(cachingStrategy_).mergeFrom(value).buildPartial();
} else {
cachingStrategy_ = value;
}
onChanged();
} else {
cachingStrategyBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000010;
return this;
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
*/
public Builder clearCachingStrategy() {
if (cachingStrategyBuilder_ == null) {
cachingStrategy_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance();
onChanged();
} else {
cachingStrategyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder getCachingStrategyBuilder() {
bitField0_ |= 0x00000010;
onChanged();
return getCachingStrategyFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder getCachingStrategyOrBuilder() {
if (cachingStrategyBuilder_ != null) {
return cachingStrategyBuilder_.getMessageOrBuilder();
} else {
return cachingStrategy_;
}
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder>
getCachingStrategyFieldBuilder() {
if (cachingStrategyBuilder_ == null) {
cachingStrategyBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder>(
cachingStrategy_,
getParentForChildren(),
isClean());
cachingStrategy_ = null;
}
return cachingStrategyBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpReadBlockProto)
}
static {
defaultInstance = new OpReadBlockProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.OpReadBlockProto)
}
public interface ChecksumProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.ChecksumTypeProto type = 1;
/**
* required .hadoop.hdfs.ChecksumTypeProto type = 1;
*/
boolean hasType();
/**
* required .hadoop.hdfs.ChecksumTypeProto type = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getType();
// required uint32 bytesPerChecksum = 2;
/**
* required uint32 bytesPerChecksum = 2;
*/
boolean hasBytesPerChecksum();
/**
* required uint32 bytesPerChecksum = 2;
*/
int getBytesPerChecksum();
}
/**
* Protobuf type {@code hadoop.hdfs.ChecksumProto}
*/
public static final class ChecksumProto extends
com.google.protobuf.GeneratedMessage
implements ChecksumProtoOrBuilder {
// Use ChecksumProto.newBuilder() to construct.
private ChecksumProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ChecksumProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ChecksumProto defaultInstance;
public static ChecksumProto getDefaultInstance() {
return defaultInstance;
}
public ChecksumProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ChecksumProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
type_ = value;
}
break;
}
case 16: {
bitField0_ |= 0x00000002;
bytesPerChecksum_ = input.readUInt32();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ChecksumProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ChecksumProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ChecksumProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ChecksumProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.ChecksumTypeProto type = 1;
public static final int TYPE_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto type_;
/**
* required .hadoop.hdfs.ChecksumTypeProto type = 1;
*/
public boolean hasType() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ChecksumTypeProto type = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getType() {
return type_;
}
// required uint32 bytesPerChecksum = 2;
public static final int BYTESPERCHECKSUM_FIELD_NUMBER = 2;
private int bytesPerChecksum_;
/**
* required uint32 bytesPerChecksum = 2;
*/
public boolean hasBytesPerChecksum() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint32 bytesPerChecksum = 2;
*/
public int getBytesPerChecksum() {
return bytesPerChecksum_;
}
private void initFields() {
type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL;
bytesPerChecksum_ = 0;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasType()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasBytesPerChecksum()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, type_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt32(2, bytesPerChecksum_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, type_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(2, bytesPerChecksum_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto) obj;
boolean result = true;
result = result && (hasType() == other.hasType());
if (hasType()) {
result = result &&
(getType() == other.getType());
}
result = result && (hasBytesPerChecksum() == other.hasBytesPerChecksum());
if (hasBytesPerChecksum()) {
result = result && (getBytesPerChecksum()
== other.getBytesPerChecksum());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasType()) {
hash = (37 * hash) + TYPE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getType());
}
if (hasBytesPerChecksum()) {
hash = (37 * hash) + BYTESPERCHECKSUM_FIELD_NUMBER;
hash = (53 * hash) + getBytesPerChecksum();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ChecksumProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ChecksumProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ChecksumProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL;
bitField0_ = (bitField0_ & ~0x00000001);
bytesPerChecksum_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ChecksumProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.type_ = type_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.bytesPerChecksum_ = bytesPerChecksum_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance()) return this;
if (other.hasType()) {
setType(other.getType());
}
if (other.hasBytesPerChecksum()) {
setBytesPerChecksum(other.getBytesPerChecksum());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasType()) {
return false;
}
if (!hasBytesPerChecksum()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.ChecksumTypeProto type = 1;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL;
/**
* required .hadoop.hdfs.ChecksumTypeProto type = 1;
*/
public boolean hasType() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ChecksumTypeProto type = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getType() {
return type_;
}
/**
* required .hadoop.hdfs.ChecksumTypeProto type = 1;
*/
public Builder setType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
type_ = value;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.ChecksumTypeProto type = 1;
*/
public Builder clearType() {
bitField0_ = (bitField0_ & ~0x00000001);
type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL;
onChanged();
return this;
}
// required uint32 bytesPerChecksum = 2;
private int bytesPerChecksum_ ;
/**
* required uint32 bytesPerChecksum = 2;
*/
public boolean hasBytesPerChecksum() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint32 bytesPerChecksum = 2;
*/
public int getBytesPerChecksum() {
return bytesPerChecksum_;
}
/**
* required uint32 bytesPerChecksum = 2;
*/
public Builder setBytesPerChecksum(int value) {
bitField0_ |= 0x00000002;
bytesPerChecksum_ = value;
onChanged();
return this;
}
/**
* required uint32 bytesPerChecksum = 2;
*/
public Builder clearBytesPerChecksum() {
bitField0_ = (bitField0_ & ~0x00000002);
bytesPerChecksum_ = 0;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ChecksumProto)
}
static {
defaultInstance = new ChecksumProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ChecksumProto)
}
public interface OpWriteBlockProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
boolean hasHeader();
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader();
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder();
// repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
java.util.List
getTargetsList();
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index);
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
int getTargetsCount();
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getTargetsOrBuilderList();
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
int index);
// optional .hadoop.hdfs.DatanodeInfoProto source = 3;
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
boolean hasSource();
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource();
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder();
// required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;
/**
* required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;
*/
boolean hasStage();
/**
* required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage getStage();
// required uint32 pipelineSize = 5;
/**
* required uint32 pipelineSize = 5;
*/
boolean hasPipelineSize();
/**
* required uint32 pipelineSize = 5;
*/
int getPipelineSize();
// required uint64 minBytesRcvd = 6;
/**
* required uint64 minBytesRcvd = 6;
*/
boolean hasMinBytesRcvd();
/**
* required uint64 minBytesRcvd = 6;
*/
long getMinBytesRcvd();
// required uint64 maxBytesRcvd = 7;
/**
* required uint64 maxBytesRcvd = 7;
*/
boolean hasMaxBytesRcvd();
/**
* required uint64 maxBytesRcvd = 7;
*/
long getMaxBytesRcvd();
// required uint64 latestGenerationStamp = 8;
/**
* required uint64 latestGenerationStamp = 8;
*/
boolean hasLatestGenerationStamp();
/**
* required uint64 latestGenerationStamp = 8;
*/
long getLatestGenerationStamp();
// required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
/**
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*
*
**
* The requested checksum mechanism for this block write.
*
*/
boolean hasRequestedChecksum();
/**
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*
*
**
* The requested checksum mechanism for this block write.
*
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getRequestedChecksum();
/**
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*
*
**
* The requested checksum mechanism for this block write.
*
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getRequestedChecksumOrBuilder();
// optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
boolean hasCachingStrategy();
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto getCachingStrategy();
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder getCachingStrategyOrBuilder();
// optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];
*/
boolean hasStorageType();
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType();
// repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
*/
java.util.List getTargetStorageTypesList();
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
*/
int getTargetStorageTypesCount();
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getTargetStorageTypes(int index);
// optional bool allowLazyPersist = 13 [default = false];
/**
* optional bool allowLazyPersist = 13 [default = false];
*
*
**
* Hint to the DataNode that the block can be allocated on transient
* storage i.e. memory and written to disk lazily. The DataNode is free
* to ignore this hint.
*
*/
boolean hasAllowLazyPersist();
/**
* optional bool allowLazyPersist = 13 [default = false];
*
*
**
* Hint to the DataNode that the block can be allocated on transient
* storage i.e. memory and written to disk lazily. The DataNode is free
* to ignore this hint.
*
*/
boolean getAllowLazyPersist();
// optional bool pinning = 14 [default = false];
/**
* optional bool pinning = 14 [default = false];
*
*
*whether to pin the block, so Balancer won't move it.
*
*/
boolean hasPinning();
/**
* optional bool pinning = 14 [default = false];
*
*
*whether to pin the block, so Balancer won't move it.
*
*/
boolean getPinning();
// repeated bool targetPinnings = 15;
/**
* repeated bool targetPinnings = 15;
*/
java.util.List getTargetPinningsList();
/**
* repeated bool targetPinnings = 15;
*/
int getTargetPinningsCount();
/**
* repeated bool targetPinnings = 15;
*/
boolean getTargetPinnings(int index);
// optional string storageId = 16;
/**
* optional string storageId = 16;
*/
boolean hasStorageId();
/**
* optional string storageId = 16;
*/
java.lang.String getStorageId();
/**
* optional string storageId = 16;
*/
com.google.protobuf.ByteString
getStorageIdBytes();
// repeated string targetStorageIds = 17;
/**
* repeated string targetStorageIds = 17;
*/
java.util.List
getTargetStorageIdsList();
/**
* repeated string targetStorageIds = 17;
*/
int getTargetStorageIdsCount();
/**
* repeated string targetStorageIds = 17;
*/
java.lang.String getTargetStorageIds(int index);
/**
* repeated string targetStorageIds = 17;
*/
com.google.protobuf.ByteString
getTargetStorageIdsBytes(int index);
}
/**
* Protobuf type {@code hadoop.hdfs.OpWriteBlockProto}
*/
public static final class OpWriteBlockProto extends
com.google.protobuf.GeneratedMessage
implements OpWriteBlockProtoOrBuilder {
// Use OpWriteBlockProto.newBuilder() to construct.
private OpWriteBlockProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private OpWriteBlockProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final OpWriteBlockProto defaultInstance;
public static OpWriteBlockProto getDefaultInstance() {
return defaultInstance;
}
public OpWriteBlockProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private OpWriteBlockProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = header_.toBuilder();
}
header_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(header_);
header_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
targets_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000002;
}
targets_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.PARSER, extensionRegistry));
break;
}
case 26: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
subBuilder = source_.toBuilder();
}
source_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(source_);
source_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
case 32: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(4, rawValue);
} else {
bitField0_ |= 0x00000004;
stage_ = value;
}
break;
}
case 40: {
bitField0_ |= 0x00000008;
pipelineSize_ = input.readUInt32();
break;
}
case 48: {
bitField0_ |= 0x00000010;
minBytesRcvd_ = input.readUInt64();
break;
}
case 56: {
bitField0_ |= 0x00000020;
maxBytesRcvd_ = input.readUInt64();
break;
}
case 64: {
bitField0_ |= 0x00000040;
latestGenerationStamp_ = input.readUInt64();
break;
}
case 74: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000080) == 0x00000080)) {
subBuilder = requestedChecksum_.toBuilder();
}
requestedChecksum_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(requestedChecksum_);
requestedChecksum_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000080;
break;
}
case 82: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000100) == 0x00000100)) {
subBuilder = cachingStrategy_.toBuilder();
}
cachingStrategy_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(cachingStrategy_);
cachingStrategy_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000100;
break;
}
case 88: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(11, rawValue);
} else {
bitField0_ |= 0x00000200;
storageType_ = value;
}
break;
}
case 96: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(12, rawValue);
} else {
if (!((mutable_bitField0_ & 0x00000800) == 0x00000800)) {
targetStorageTypes_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000800;
}
targetStorageTypes_.add(value);
}
break;
}
case 98: {
int length = input.readRawVarint32();
int oldLimit = input.pushLimit(length);
while(input.getBytesUntilLimit() > 0) {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(12, rawValue);
} else {
if (!((mutable_bitField0_ & 0x00000800) == 0x00000800)) {
targetStorageTypes_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000800;
}
targetStorageTypes_.add(value);
}
}
input.popLimit(oldLimit);
break;
}
case 104: {
bitField0_ |= 0x00000400;
allowLazyPersist_ = input.readBool();
break;
}
case 112: {
bitField0_ |= 0x00000800;
pinning_ = input.readBool();
break;
}
case 120: {
if (!((mutable_bitField0_ & 0x00004000) == 0x00004000)) {
targetPinnings_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00004000;
}
targetPinnings_.add(input.readBool());
break;
}
case 122: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00004000) == 0x00004000) && input.getBytesUntilLimit() > 0) {
targetPinnings_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00004000;
}
while (input.getBytesUntilLimit() > 0) {
targetPinnings_.add(input.readBool());
}
input.popLimit(limit);
break;
}
case 130: {
bitField0_ |= 0x00001000;
storageId_ = input.readBytes();
break;
}
case 138: {
if (!((mutable_bitField0_ & 0x00010000) == 0x00010000)) {
targetStorageIds_ = new com.google.protobuf.LazyStringArrayList();
mutable_bitField0_ |= 0x00010000;
}
targetStorageIds_.add(input.readBytes());
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
targets_ = java.util.Collections.unmodifiableList(targets_);
}
if (((mutable_bitField0_ & 0x00000800) == 0x00000800)) {
targetStorageTypes_ = java.util.Collections.unmodifiableList(targetStorageTypes_);
}
if (((mutable_bitField0_ & 0x00004000) == 0x00004000)) {
targetPinnings_ = java.util.Collections.unmodifiableList(targetPinnings_);
}
if (((mutable_bitField0_ & 0x00010000) == 0x00010000)) {
targetStorageIds_ = new com.google.protobuf.UnmodifiableLazyStringList(targetStorageIds_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpWriteBlockProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpWriteBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public OpWriteBlockProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new OpWriteBlockProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
/**
* Protobuf enum {@code hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage}
*/
public enum BlockConstructionStage
implements com.google.protobuf.ProtocolMessageEnum {
/**
* PIPELINE_SETUP_APPEND = 0;
*/
PIPELINE_SETUP_APPEND(0, 0),
/**
* PIPELINE_SETUP_APPEND_RECOVERY = 1;
*
*
* pipeline set up for failed PIPELINE_SETUP_APPEND recovery
*
*/
PIPELINE_SETUP_APPEND_RECOVERY(1, 1),
/**
* DATA_STREAMING = 2;
*
*
* data streaming
*
*/
DATA_STREAMING(2, 2),
/**
* PIPELINE_SETUP_STREAMING_RECOVERY = 3;
*
*
* pipeline setup for failed data streaming recovery
*
*/
PIPELINE_SETUP_STREAMING_RECOVERY(3, 3),
/**
* PIPELINE_CLOSE = 4;
*
*
* close the block and pipeline
*
*/
PIPELINE_CLOSE(4, 4),
/**
* PIPELINE_CLOSE_RECOVERY = 5;
*
*
* Recover a failed PIPELINE_CLOSE
*
*/
PIPELINE_CLOSE_RECOVERY(5, 5),
/**
* PIPELINE_SETUP_CREATE = 6;
*
*
* pipeline set up for block creation
*
*/
PIPELINE_SETUP_CREATE(6, 6),
/**
* TRANSFER_RBW = 7;
*
*
* transfer RBW for adding datanodes
*
*/
TRANSFER_RBW(7, 7),
/**
* TRANSFER_FINALIZED = 8;
*
*
* transfer Finalized for adding datanodes
*
*/
TRANSFER_FINALIZED(8, 8),
;
/**
* PIPELINE_SETUP_APPEND = 0;
*/
public static final int PIPELINE_SETUP_APPEND_VALUE = 0;
/**
* PIPELINE_SETUP_APPEND_RECOVERY = 1;
*
*
* pipeline set up for failed PIPELINE_SETUP_APPEND recovery
*
*/
public static final int PIPELINE_SETUP_APPEND_RECOVERY_VALUE = 1;
/**
* DATA_STREAMING = 2;
*
*
* data streaming
*
*/
public static final int DATA_STREAMING_VALUE = 2;
/**
* PIPELINE_SETUP_STREAMING_RECOVERY = 3;
*
*
* pipeline setup for failed data streaming recovery
*
*/
public static final int PIPELINE_SETUP_STREAMING_RECOVERY_VALUE = 3;
/**
* PIPELINE_CLOSE = 4;
*
*
* close the block and pipeline
*
*/
public static final int PIPELINE_CLOSE_VALUE = 4;
/**
* PIPELINE_CLOSE_RECOVERY = 5;
*
*
* Recover a failed PIPELINE_CLOSE
*
*/
public static final int PIPELINE_CLOSE_RECOVERY_VALUE = 5;
/**
* PIPELINE_SETUP_CREATE = 6;
*
*
* pipeline set up for block creation
*
*/
public static final int PIPELINE_SETUP_CREATE_VALUE = 6;
/**
* TRANSFER_RBW = 7;
*
*
* transfer RBW for adding datanodes
*
*/
public static final int TRANSFER_RBW_VALUE = 7;
/**
* TRANSFER_FINALIZED = 8;
*
*
* transfer Finalized for adding datanodes
*
*/
public static final int TRANSFER_FINALIZED_VALUE = 8;
public final int getNumber() { return value; }
public static BlockConstructionStage valueOf(int value) {
switch (value) {
case 0: return PIPELINE_SETUP_APPEND;
case 1: return PIPELINE_SETUP_APPEND_RECOVERY;
case 2: return DATA_STREAMING;
case 3: return PIPELINE_SETUP_STREAMING_RECOVERY;
case 4: return PIPELINE_CLOSE;
case 5: return PIPELINE_CLOSE_RECOVERY;
case 6: return PIPELINE_SETUP_CREATE;
case 7: return TRANSFER_RBW;
case 8: return TRANSFER_FINALIZED;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public BlockConstructionStage findValueByNumber(int number) {
return BlockConstructionStage.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.getDescriptor().getEnumTypes().get(0);
}
private static final BlockConstructionStage[] VALUES = values();
public static BlockConstructionStage valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private BlockConstructionStage(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage)
}
private int bitField0_;
// required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
public static final int HEADER_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_;
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
return header_;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
return header_;
}
// repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
public static final int TARGETS_FIELD_NUMBER = 2;
private java.util.List targets_;
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public java.util.List getTargetsList() {
return targets_;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getTargetsOrBuilderList() {
return targets_;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public int getTargetsCount() {
return targets_.size();
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index) {
return targets_.get(index);
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
int index) {
return targets_.get(index);
}
// optional .hadoop.hdfs.DatanodeInfoProto source = 3;
public static final int SOURCE_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto source_;
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public boolean hasSource() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource() {
return source_;
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder() {
return source_;
}
// required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;
public static final int STAGE_FIELD_NUMBER = 4;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage stage_;
/**
* required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;
*/
public boolean hasStage() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage getStage() {
return stage_;
}
// required uint32 pipelineSize = 5;
public static final int PIPELINESIZE_FIELD_NUMBER = 5;
private int pipelineSize_;
/**
* required uint32 pipelineSize = 5;
*/
public boolean hasPipelineSize() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required uint32 pipelineSize = 5;
*/
public int getPipelineSize() {
return pipelineSize_;
}
// required uint64 minBytesRcvd = 6;
public static final int MINBYTESRCVD_FIELD_NUMBER = 6;
private long minBytesRcvd_;
/**
* required uint64 minBytesRcvd = 6;
*/
public boolean hasMinBytesRcvd() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required uint64 minBytesRcvd = 6;
*/
public long getMinBytesRcvd() {
return minBytesRcvd_;
}
// required uint64 maxBytesRcvd = 7;
public static final int MAXBYTESRCVD_FIELD_NUMBER = 7;
private long maxBytesRcvd_;
/**
* required uint64 maxBytesRcvd = 7;
*/
public boolean hasMaxBytesRcvd() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* required uint64 maxBytesRcvd = 7;
*/
public long getMaxBytesRcvd() {
return maxBytesRcvd_;
}
// required uint64 latestGenerationStamp = 8;
public static final int LATESTGENERATIONSTAMP_FIELD_NUMBER = 8;
private long latestGenerationStamp_;
/**
* required uint64 latestGenerationStamp = 8;
*/
public boolean hasLatestGenerationStamp() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* required uint64 latestGenerationStamp = 8;
*/
public long getLatestGenerationStamp() {
return latestGenerationStamp_;
}
// required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
public static final int REQUESTEDCHECKSUM_FIELD_NUMBER = 9;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto requestedChecksum_;
/**
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*
*
**
* The requested checksum mechanism for this block write.
*
*/
public boolean hasRequestedChecksum() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*
*
**
* The requested checksum mechanism for this block write.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getRequestedChecksum() {
return requestedChecksum_;
}
/**
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*
*
**
* The requested checksum mechanism for this block write.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getRequestedChecksumOrBuilder() {
return requestedChecksum_;
}
// optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
public static final int CACHINGSTRATEGY_FIELD_NUMBER = 10;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto cachingStrategy_;
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
public boolean hasCachingStrategy() {
return ((bitField0_ & 0x00000100) == 0x00000100);
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto getCachingStrategy() {
return cachingStrategy_;
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder getCachingStrategyOrBuilder() {
return cachingStrategy_;
}
// optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];
public static final int STORAGETYPE_FIELD_NUMBER = 11;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto storageType_;
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];
*/
public boolean hasStorageType() {
return ((bitField0_ & 0x00000200) == 0x00000200);
}
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() {
return storageType_;
}
// repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
public static final int TARGETSTORAGETYPES_FIELD_NUMBER = 12;
private java.util.List targetStorageTypes_;
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
*/
public java.util.List getTargetStorageTypesList() {
return targetStorageTypes_;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
*/
public int getTargetStorageTypesCount() {
return targetStorageTypes_.size();
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getTargetStorageTypes(int index) {
return targetStorageTypes_.get(index);
}
// optional bool allowLazyPersist = 13 [default = false];
public static final int ALLOWLAZYPERSIST_FIELD_NUMBER = 13;
private boolean allowLazyPersist_;
/**
* optional bool allowLazyPersist = 13 [default = false];
*
*
**
* Hint to the DataNode that the block can be allocated on transient
* storage i.e. memory and written to disk lazily. The DataNode is free
* to ignore this hint.
*
*/
public boolean hasAllowLazyPersist() {
return ((bitField0_ & 0x00000400) == 0x00000400);
}
/**
* optional bool allowLazyPersist = 13 [default = false];
*
*
**
* Hint to the DataNode that the block can be allocated on transient
* storage i.e. memory and written to disk lazily. The DataNode is free
* to ignore this hint.
*
*/
public boolean getAllowLazyPersist() {
return allowLazyPersist_;
}
// optional bool pinning = 14 [default = false];
public static final int PINNING_FIELD_NUMBER = 14;
private boolean pinning_;
/**
* optional bool pinning = 14 [default = false];
*
*
*whether to pin the block, so Balancer won't move it.
*
*/
public boolean hasPinning() {
return ((bitField0_ & 0x00000800) == 0x00000800);
}
/**
* optional bool pinning = 14 [default = false];
*
*
*whether to pin the block, so Balancer won't move it.
*
*/
public boolean getPinning() {
return pinning_;
}
// repeated bool targetPinnings = 15;
public static final int TARGETPINNINGS_FIELD_NUMBER = 15;
private java.util.List targetPinnings_;
/**
* repeated bool targetPinnings = 15;
*/
public java.util.List
getTargetPinningsList() {
return targetPinnings_;
}
/**
* repeated bool targetPinnings = 15;
*/
public int getTargetPinningsCount() {
return targetPinnings_.size();
}
/**
* repeated bool targetPinnings = 15;
*/
public boolean getTargetPinnings(int index) {
return targetPinnings_.get(index);
}
// optional string storageId = 16;
public static final int STORAGEID_FIELD_NUMBER = 16;
private java.lang.Object storageId_;
/**
* optional string storageId = 16;
*/
public boolean hasStorageId() {
return ((bitField0_ & 0x00001000) == 0x00001000);
}
/**
* optional string storageId = 16;
*/
public java.lang.String getStorageId() {
java.lang.Object ref = storageId_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
storageId_ = s;
}
return s;
}
}
/**
* optional string storageId = 16;
*/
public com.google.protobuf.ByteString
getStorageIdBytes() {
java.lang.Object ref = storageId_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
storageId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// repeated string targetStorageIds = 17;
public static final int TARGETSTORAGEIDS_FIELD_NUMBER = 17;
private com.google.protobuf.LazyStringList targetStorageIds_;
/**
* repeated string targetStorageIds = 17;
*/
public java.util.List
getTargetStorageIdsList() {
return targetStorageIds_;
}
/**
* repeated string targetStorageIds = 17;
*/
public int getTargetStorageIdsCount() {
return targetStorageIds_.size();
}
/**
* repeated string targetStorageIds = 17;
*/
public java.lang.String getTargetStorageIds(int index) {
return targetStorageIds_.get(index);
}
/**
* repeated string targetStorageIds = 17;
*/
public com.google.protobuf.ByteString
getTargetStorageIdsBytes(int index) {
return targetStorageIds_.getByteString(index);
}
private void initFields() {
header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
targets_ = java.util.Collections.emptyList();
source_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance();
stage_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.PIPELINE_SETUP_APPEND;
pipelineSize_ = 0;
minBytesRcvd_ = 0L;
maxBytesRcvd_ = 0L;
latestGenerationStamp_ = 0L;
requestedChecksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance();
cachingStrategy_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance();
storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
targetStorageTypes_ = java.util.Collections.emptyList();
allowLazyPersist_ = false;
pinning_ = false;
targetPinnings_ = java.util.Collections.emptyList();
storageId_ = "";
targetStorageIds_ = com.google.protobuf.LazyStringArrayList.EMPTY;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasHeader()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasStage()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasPipelineSize()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasMinBytesRcvd()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasMaxBytesRcvd()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasLatestGenerationStamp()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasRequestedChecksum()) {
memoizedIsInitialized = 0;
return false;
}
if (!getHeader().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getTargetsCount(); i++) {
if (!getTargets(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasSource()) {
if (!getSource().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (!getRequestedChecksum().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, header_);
}
for (int i = 0; i < targets_.size(); i++) {
output.writeMessage(2, targets_.get(i));
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeMessage(3, source_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeEnum(4, stage_.getNumber());
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt32(5, pipelineSize_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeUInt64(6, minBytesRcvd_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeUInt64(7, maxBytesRcvd_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeUInt64(8, latestGenerationStamp_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
output.writeMessage(9, requestedChecksum_);
}
if (((bitField0_ & 0x00000100) == 0x00000100)) {
output.writeMessage(10, cachingStrategy_);
}
if (((bitField0_ & 0x00000200) == 0x00000200)) {
output.writeEnum(11, storageType_.getNumber());
}
for (int i = 0; i < targetStorageTypes_.size(); i++) {
output.writeEnum(12, targetStorageTypes_.get(i).getNumber());
}
if (((bitField0_ & 0x00000400) == 0x00000400)) {
output.writeBool(13, allowLazyPersist_);
}
if (((bitField0_ & 0x00000800) == 0x00000800)) {
output.writeBool(14, pinning_);
}
for (int i = 0; i < targetPinnings_.size(); i++) {
output.writeBool(15, targetPinnings_.get(i));
}
if (((bitField0_ & 0x00001000) == 0x00001000)) {
output.writeBytes(16, getStorageIdBytes());
}
for (int i = 0; i < targetStorageIds_.size(); i++) {
output.writeBytes(17, targetStorageIds_.getByteString(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, header_);
}
for (int i = 0; i < targets_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, targets_.get(i));
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, source_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(4, stage_.getNumber());
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(5, pipelineSize_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(6, minBytesRcvd_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(7, maxBytesRcvd_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(8, latestGenerationStamp_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(9, requestedChecksum_);
}
if (((bitField0_ & 0x00000100) == 0x00000100)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(10, cachingStrategy_);
}
if (((bitField0_ & 0x00000200) == 0x00000200)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(11, storageType_.getNumber());
}
{
int dataSize = 0;
for (int i = 0; i < targetStorageTypes_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeEnumSizeNoTag(targetStorageTypes_.get(i).getNumber());
}
size += dataSize;
size += 1 * targetStorageTypes_.size();
}
if (((bitField0_ & 0x00000400) == 0x00000400)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(13, allowLazyPersist_);
}
if (((bitField0_ & 0x00000800) == 0x00000800)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(14, pinning_);
}
{
int dataSize = 0;
dataSize = 1 * getTargetPinningsList().size();
size += dataSize;
size += 1 * getTargetPinningsList().size();
}
if (((bitField0_ & 0x00001000) == 0x00001000)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(16, getStorageIdBytes());
}
{
int dataSize = 0;
for (int i = 0; i < targetStorageIds_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeBytesSizeNoTag(targetStorageIds_.getByteString(i));
}
size += dataSize;
size += 2 * getTargetStorageIdsList().size();
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto) obj;
boolean result = true;
result = result && (hasHeader() == other.hasHeader());
if (hasHeader()) {
result = result && getHeader()
.equals(other.getHeader());
}
result = result && getTargetsList()
.equals(other.getTargetsList());
result = result && (hasSource() == other.hasSource());
if (hasSource()) {
result = result && getSource()
.equals(other.getSource());
}
result = result && (hasStage() == other.hasStage());
if (hasStage()) {
result = result &&
(getStage() == other.getStage());
}
result = result && (hasPipelineSize() == other.hasPipelineSize());
if (hasPipelineSize()) {
result = result && (getPipelineSize()
== other.getPipelineSize());
}
result = result && (hasMinBytesRcvd() == other.hasMinBytesRcvd());
if (hasMinBytesRcvd()) {
result = result && (getMinBytesRcvd()
== other.getMinBytesRcvd());
}
result = result && (hasMaxBytesRcvd() == other.hasMaxBytesRcvd());
if (hasMaxBytesRcvd()) {
result = result && (getMaxBytesRcvd()
== other.getMaxBytesRcvd());
}
result = result && (hasLatestGenerationStamp() == other.hasLatestGenerationStamp());
if (hasLatestGenerationStamp()) {
result = result && (getLatestGenerationStamp()
== other.getLatestGenerationStamp());
}
result = result && (hasRequestedChecksum() == other.hasRequestedChecksum());
if (hasRequestedChecksum()) {
result = result && getRequestedChecksum()
.equals(other.getRequestedChecksum());
}
result = result && (hasCachingStrategy() == other.hasCachingStrategy());
if (hasCachingStrategy()) {
result = result && getCachingStrategy()
.equals(other.getCachingStrategy());
}
result = result && (hasStorageType() == other.hasStorageType());
if (hasStorageType()) {
result = result &&
(getStorageType() == other.getStorageType());
}
result = result && getTargetStorageTypesList()
.equals(other.getTargetStorageTypesList());
result = result && (hasAllowLazyPersist() == other.hasAllowLazyPersist());
if (hasAllowLazyPersist()) {
result = result && (getAllowLazyPersist()
== other.getAllowLazyPersist());
}
result = result && (hasPinning() == other.hasPinning());
if (hasPinning()) {
result = result && (getPinning()
== other.getPinning());
}
result = result && getTargetPinningsList()
.equals(other.getTargetPinningsList());
result = result && (hasStorageId() == other.hasStorageId());
if (hasStorageId()) {
result = result && getStorageId()
.equals(other.getStorageId());
}
result = result && getTargetStorageIdsList()
.equals(other.getTargetStorageIdsList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasHeader()) {
hash = (37 * hash) + HEADER_FIELD_NUMBER;
hash = (53 * hash) + getHeader().hashCode();
}
if (getTargetsCount() > 0) {
hash = (37 * hash) + TARGETS_FIELD_NUMBER;
hash = (53 * hash) + getTargetsList().hashCode();
}
if (hasSource()) {
hash = (37 * hash) + SOURCE_FIELD_NUMBER;
hash = (53 * hash) + getSource().hashCode();
}
if (hasStage()) {
hash = (37 * hash) + STAGE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getStage());
}
if (hasPipelineSize()) {
hash = (37 * hash) + PIPELINESIZE_FIELD_NUMBER;
hash = (53 * hash) + getPipelineSize();
}
if (hasMinBytesRcvd()) {
hash = (37 * hash) + MINBYTESRCVD_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getMinBytesRcvd());
}
if (hasMaxBytesRcvd()) {
hash = (37 * hash) + MAXBYTESRCVD_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getMaxBytesRcvd());
}
if (hasLatestGenerationStamp()) {
hash = (37 * hash) + LATESTGENERATIONSTAMP_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getLatestGenerationStamp());
}
if (hasRequestedChecksum()) {
hash = (37 * hash) + REQUESTEDCHECKSUM_FIELD_NUMBER;
hash = (53 * hash) + getRequestedChecksum().hashCode();
}
if (hasCachingStrategy()) {
hash = (37 * hash) + CACHINGSTRATEGY_FIELD_NUMBER;
hash = (53 * hash) + getCachingStrategy().hashCode();
}
if (hasStorageType()) {
hash = (37 * hash) + STORAGETYPE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getStorageType());
}
if (getTargetStorageTypesCount() > 0) {
hash = (37 * hash) + TARGETSTORAGETYPES_FIELD_NUMBER;
hash = (53 * hash) + hashEnumList(getTargetStorageTypesList());
}
if (hasAllowLazyPersist()) {
hash = (37 * hash) + ALLOWLAZYPERSIST_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getAllowLazyPersist());
}
if (hasPinning()) {
hash = (37 * hash) + PINNING_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getPinning());
}
if (getTargetPinningsCount() > 0) {
hash = (37 * hash) + TARGETPINNINGS_FIELD_NUMBER;
hash = (53 * hash) + getTargetPinningsList().hashCode();
}
if (hasStorageId()) {
hash = (37 * hash) + STORAGEID_FIELD_NUMBER;
hash = (53 * hash) + getStorageId().hashCode();
}
if (getTargetStorageIdsCount() > 0) {
hash = (37 * hash) + TARGETSTORAGEIDS_FIELD_NUMBER;
hash = (53 * hash) + getTargetStorageIdsList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.OpWriteBlockProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpWriteBlockProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpWriteBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getHeaderFieldBuilder();
getTargetsFieldBuilder();
getSourceFieldBuilder();
getRequestedChecksumFieldBuilder();
getCachingStrategyFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (headerBuilder_ == null) {
header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
if (targetsBuilder_ == null) {
targets_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
} else {
targetsBuilder_.clear();
}
if (sourceBuilder_ == null) {
source_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance();
} else {
sourceBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
stage_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.PIPELINE_SETUP_APPEND;
bitField0_ = (bitField0_ & ~0x00000008);
pipelineSize_ = 0;
bitField0_ = (bitField0_ & ~0x00000010);
minBytesRcvd_ = 0L;
bitField0_ = (bitField0_ & ~0x00000020);
maxBytesRcvd_ = 0L;
bitField0_ = (bitField0_ & ~0x00000040);
latestGenerationStamp_ = 0L;
bitField0_ = (bitField0_ & ~0x00000080);
if (requestedChecksumBuilder_ == null) {
requestedChecksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance();
} else {
requestedChecksumBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000100);
if (cachingStrategyBuilder_ == null) {
cachingStrategy_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance();
} else {
cachingStrategyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000200);
storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
bitField0_ = (bitField0_ & ~0x00000400);
targetStorageTypes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000800);
allowLazyPersist_ = false;
bitField0_ = (bitField0_ & ~0x00001000);
pinning_ = false;
bitField0_ = (bitField0_ & ~0x00002000);
targetPinnings_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00004000);
storageId_ = "";
bitField0_ = (bitField0_ & ~0x00008000);
targetStorageIds_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00010000);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpWriteBlockProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (headerBuilder_ == null) {
result.header_ = header_;
} else {
result.header_ = headerBuilder_.build();
}
if (targetsBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002)) {
targets_ = java.util.Collections.unmodifiableList(targets_);
bitField0_ = (bitField0_ & ~0x00000002);
}
result.targets_ = targets_;
} else {
result.targets_ = targetsBuilder_.build();
}
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000002;
}
if (sourceBuilder_ == null) {
result.source_ = source_;
} else {
result.source_ = sourceBuilder_.build();
}
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000004;
}
result.stage_ = stage_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000008;
}
result.pipelineSize_ = pipelineSize_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000010;
}
result.minBytesRcvd_ = minBytesRcvd_;
if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
to_bitField0_ |= 0x00000020;
}
result.maxBytesRcvd_ = maxBytesRcvd_;
if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
to_bitField0_ |= 0x00000040;
}
result.latestGenerationStamp_ = latestGenerationStamp_;
if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
to_bitField0_ |= 0x00000080;
}
if (requestedChecksumBuilder_ == null) {
result.requestedChecksum_ = requestedChecksum_;
} else {
result.requestedChecksum_ = requestedChecksumBuilder_.build();
}
if (((from_bitField0_ & 0x00000200) == 0x00000200)) {
to_bitField0_ |= 0x00000100;
}
if (cachingStrategyBuilder_ == null) {
result.cachingStrategy_ = cachingStrategy_;
} else {
result.cachingStrategy_ = cachingStrategyBuilder_.build();
}
if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
to_bitField0_ |= 0x00000200;
}
result.storageType_ = storageType_;
if (((bitField0_ & 0x00000800) == 0x00000800)) {
targetStorageTypes_ = java.util.Collections.unmodifiableList(targetStorageTypes_);
bitField0_ = (bitField0_ & ~0x00000800);
}
result.targetStorageTypes_ = targetStorageTypes_;
if (((from_bitField0_ & 0x00001000) == 0x00001000)) {
to_bitField0_ |= 0x00000400;
}
result.allowLazyPersist_ = allowLazyPersist_;
if (((from_bitField0_ & 0x00002000) == 0x00002000)) {
to_bitField0_ |= 0x00000800;
}
result.pinning_ = pinning_;
if (((bitField0_ & 0x00004000) == 0x00004000)) {
targetPinnings_ = java.util.Collections.unmodifiableList(targetPinnings_);
bitField0_ = (bitField0_ & ~0x00004000);
}
result.targetPinnings_ = targetPinnings_;
if (((from_bitField0_ & 0x00008000) == 0x00008000)) {
to_bitField0_ |= 0x00001000;
}
result.storageId_ = storageId_;
if (((bitField0_ & 0x00010000) == 0x00010000)) {
targetStorageIds_ = new com.google.protobuf.UnmodifiableLazyStringList(
targetStorageIds_);
bitField0_ = (bitField0_ & ~0x00010000);
}
result.targetStorageIds_ = targetStorageIds_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.getDefaultInstance()) return this;
if (other.hasHeader()) {
mergeHeader(other.getHeader());
}
if (targetsBuilder_ == null) {
if (!other.targets_.isEmpty()) {
if (targets_.isEmpty()) {
targets_ = other.targets_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureTargetsIsMutable();
targets_.addAll(other.targets_);
}
onChanged();
}
} else {
if (!other.targets_.isEmpty()) {
if (targetsBuilder_.isEmpty()) {
targetsBuilder_.dispose();
targetsBuilder_ = null;
targets_ = other.targets_;
bitField0_ = (bitField0_ & ~0x00000002);
targetsBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getTargetsFieldBuilder() : null;
} else {
targetsBuilder_.addAllMessages(other.targets_);
}
}
}
if (other.hasSource()) {
mergeSource(other.getSource());
}
if (other.hasStage()) {
setStage(other.getStage());
}
if (other.hasPipelineSize()) {
setPipelineSize(other.getPipelineSize());
}
if (other.hasMinBytesRcvd()) {
setMinBytesRcvd(other.getMinBytesRcvd());
}
if (other.hasMaxBytesRcvd()) {
setMaxBytesRcvd(other.getMaxBytesRcvd());
}
if (other.hasLatestGenerationStamp()) {
setLatestGenerationStamp(other.getLatestGenerationStamp());
}
if (other.hasRequestedChecksum()) {
mergeRequestedChecksum(other.getRequestedChecksum());
}
if (other.hasCachingStrategy()) {
mergeCachingStrategy(other.getCachingStrategy());
}
if (other.hasStorageType()) {
setStorageType(other.getStorageType());
}
if (!other.targetStorageTypes_.isEmpty()) {
if (targetStorageTypes_.isEmpty()) {
targetStorageTypes_ = other.targetStorageTypes_;
bitField0_ = (bitField0_ & ~0x00000800);
} else {
ensureTargetStorageTypesIsMutable();
targetStorageTypes_.addAll(other.targetStorageTypes_);
}
onChanged();
}
if (other.hasAllowLazyPersist()) {
setAllowLazyPersist(other.getAllowLazyPersist());
}
if (other.hasPinning()) {
setPinning(other.getPinning());
}
if (!other.targetPinnings_.isEmpty()) {
if (targetPinnings_.isEmpty()) {
targetPinnings_ = other.targetPinnings_;
bitField0_ = (bitField0_ & ~0x00004000);
} else {
ensureTargetPinningsIsMutable();
targetPinnings_.addAll(other.targetPinnings_);
}
onChanged();
}
if (other.hasStorageId()) {
bitField0_ |= 0x00008000;
storageId_ = other.storageId_;
onChanged();
}
if (!other.targetStorageIds_.isEmpty()) {
if (targetStorageIds_.isEmpty()) {
targetStorageIds_ = other.targetStorageIds_;
bitField0_ = (bitField0_ & ~0x00010000);
} else {
ensureTargetStorageIdsIsMutable();
targetStorageIds_.addAll(other.targetStorageIds_);
}
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasHeader()) {
return false;
}
if (!hasStage()) {
return false;
}
if (!hasPipelineSize()) {
return false;
}
if (!hasMinBytesRcvd()) {
return false;
}
if (!hasMaxBytesRcvd()) {
return false;
}
if (!hasLatestGenerationStamp()) {
return false;
}
if (!hasRequestedChecksum()) {
return false;
}
if (!getHeader().isInitialized()) {
return false;
}
for (int i = 0; i < getTargetsCount(); i++) {
if (!getTargets(i).isInitialized()) {
return false;
}
}
if (hasSource()) {
if (!getSource().isInitialized()) {
return false;
}
}
if (!getRequestedChecksum().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder> headerBuilder_;
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
if (headerBuilder_ == null) {
return header_;
} else {
return headerBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) {
if (headerBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
header_ = value;
onChanged();
} else {
headerBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public Builder setHeader(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder builderForValue) {
if (headerBuilder_ == null) {
header_ = builderForValue.build();
onChanged();
} else {
headerBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) {
if (headerBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance()) {
header_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial();
} else {
header_ = value;
}
onChanged();
} else {
headerBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public Builder clearHeader() {
if (headerBuilder_ == null) {
header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
onChanged();
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder getHeaderBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getHeaderFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
if (headerBuilder_ != null) {
return headerBuilder_.getMessageOrBuilder();
} else {
return header_;
}
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder>
getHeaderFieldBuilder() {
if (headerBuilder_ == null) {
headerBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder>(
header_,
getParentForChildren(),
isClean());
header_ = null;
}
return headerBuilder_;
}
// repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
private java.util.List targets_ =
java.util.Collections.emptyList();
private void ensureTargetsIsMutable() {
if (!((bitField0_ & 0x00000002) == 0x00000002)) {
targets_ = new java.util.ArrayList(targets_);
bitField0_ |= 0x00000002;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> targetsBuilder_;
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public java.util.List getTargetsList() {
if (targetsBuilder_ == null) {
return java.util.Collections.unmodifiableList(targets_);
} else {
return targetsBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public int getTargetsCount() {
if (targetsBuilder_ == null) {
return targets_.size();
} else {
return targetsBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index) {
if (targetsBuilder_ == null) {
return targets_.get(index);
} else {
return targetsBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder setTargets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (targetsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetsIsMutable();
targets_.set(index, value);
onChanged();
} else {
targetsBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder setTargets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
targets_.set(index, builderForValue.build());
onChanged();
} else {
targetsBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder addTargets(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (targetsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetsIsMutable();
targets_.add(value);
onChanged();
} else {
targetsBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder addTargets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (targetsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetsIsMutable();
targets_.add(index, value);
onChanged();
} else {
targetsBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder addTargets(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
targets_.add(builderForValue.build());
onChanged();
} else {
targetsBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder addTargets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
targets_.add(index, builderForValue.build());
onChanged();
} else {
targetsBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder addAllTargets(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> values) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
super.addAll(values, targets_);
onChanged();
} else {
targetsBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder clearTargets() {
if (targetsBuilder_ == null) {
targets_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
} else {
targetsBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder removeTargets(int index) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
targets_.remove(index);
onChanged();
} else {
targetsBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getTargetsBuilder(
int index) {
return getTargetsFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
int index) {
if (targetsBuilder_ == null) {
return targets_.get(index); } else {
return targetsBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getTargetsOrBuilderList() {
if (targetsBuilder_ != null) {
return targetsBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(targets_);
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addTargetsBuilder() {
return getTargetsFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addTargetsBuilder(
int index) {
return getTargetsFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public java.util.List
getTargetsBuilderList() {
return getTargetsFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getTargetsFieldBuilder() {
if (targetsBuilder_ == null) {
targetsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>(
targets_,
((bitField0_ & 0x00000002) == 0x00000002),
getParentForChildren(),
isClean());
targets_ = null;
}
return targetsBuilder_;
}
// optional .hadoop.hdfs.DatanodeInfoProto source = 3;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto source_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> sourceBuilder_;
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public boolean hasSource() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource() {
if (sourceBuilder_ == null) {
return source_;
} else {
return sourceBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public Builder setSource(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (sourceBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
source_ = value;
onChanged();
} else {
sourceBuilder_.setMessage(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public Builder setSource(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (sourceBuilder_ == null) {
source_ = builderForValue.build();
onChanged();
} else {
sourceBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public Builder mergeSource(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (sourceBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004) &&
source_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()) {
source_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder(source_).mergeFrom(value).buildPartial();
} else {
source_ = value;
}
onChanged();
} else {
sourceBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public Builder clearSource() {
if (sourceBuilder_ == null) {
source_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance();
onChanged();
} else {
sourceBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getSourceBuilder() {
bitField0_ |= 0x00000004;
onChanged();
return getSourceFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder() {
if (sourceBuilder_ != null) {
return sourceBuilder_.getMessageOrBuilder();
} else {
return source_;
}
}
/**
* optional .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getSourceFieldBuilder() {
if (sourceBuilder_ == null) {
sourceBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>(
source_,
getParentForChildren(),
isClean());
source_ = null;
}
return sourceBuilder_;
}
// required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage stage_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.PIPELINE_SETUP_APPEND;
/**
* required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;
*/
public boolean hasStage() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage getStage() {
return stage_;
}
/**
* required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;
*/
public Builder setStage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
stage_ = value;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;
*/
public Builder clearStage() {
bitField0_ = (bitField0_ & ~0x00000008);
stage_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.PIPELINE_SETUP_APPEND;
onChanged();
return this;
}
// required uint32 pipelineSize = 5;
private int pipelineSize_ ;
/**
* required uint32 pipelineSize = 5;
*/
public boolean hasPipelineSize() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* required uint32 pipelineSize = 5;
*/
public int getPipelineSize() {
return pipelineSize_;
}
/**
* required uint32 pipelineSize = 5;
*/
public Builder setPipelineSize(int value) {
bitField0_ |= 0x00000010;
pipelineSize_ = value;
onChanged();
return this;
}
/**
* required uint32 pipelineSize = 5;
*/
public Builder clearPipelineSize() {
bitField0_ = (bitField0_ & ~0x00000010);
pipelineSize_ = 0;
onChanged();
return this;
}
// required uint64 minBytesRcvd = 6;
private long minBytesRcvd_ ;
/**
* required uint64 minBytesRcvd = 6;
*/
public boolean hasMinBytesRcvd() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* required uint64 minBytesRcvd = 6;
*/
public long getMinBytesRcvd() {
return minBytesRcvd_;
}
/**
* required uint64 minBytesRcvd = 6;
*/
public Builder setMinBytesRcvd(long value) {
bitField0_ |= 0x00000020;
minBytesRcvd_ = value;
onChanged();
return this;
}
/**
* required uint64 minBytesRcvd = 6;
*/
public Builder clearMinBytesRcvd() {
bitField0_ = (bitField0_ & ~0x00000020);
minBytesRcvd_ = 0L;
onChanged();
return this;
}
// required uint64 maxBytesRcvd = 7;
private long maxBytesRcvd_ ;
/**
* required uint64 maxBytesRcvd = 7;
*/
public boolean hasMaxBytesRcvd() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* required uint64 maxBytesRcvd = 7;
*/
public long getMaxBytesRcvd() {
return maxBytesRcvd_;
}
/**
* required uint64 maxBytesRcvd = 7;
*/
public Builder setMaxBytesRcvd(long value) {
bitField0_ |= 0x00000040;
maxBytesRcvd_ = value;
onChanged();
return this;
}
/**
* required uint64 maxBytesRcvd = 7;
*/
public Builder clearMaxBytesRcvd() {
bitField0_ = (bitField0_ & ~0x00000040);
maxBytesRcvd_ = 0L;
onChanged();
return this;
}
// required uint64 latestGenerationStamp = 8;
private long latestGenerationStamp_ ;
/**
* required uint64 latestGenerationStamp = 8;
*/
public boolean hasLatestGenerationStamp() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* required uint64 latestGenerationStamp = 8;
*/
public long getLatestGenerationStamp() {
return latestGenerationStamp_;
}
/**
* required uint64 latestGenerationStamp = 8;
*/
public Builder setLatestGenerationStamp(long value) {
bitField0_ |= 0x00000080;
latestGenerationStamp_ = value;
onChanged();
return this;
}
/**
* required uint64 latestGenerationStamp = 8;
*/
public Builder clearLatestGenerationStamp() {
bitField0_ = (bitField0_ & ~0x00000080);
latestGenerationStamp_ = 0L;
onChanged();
return this;
}
// required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto requestedChecksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder> requestedChecksumBuilder_;
/**
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*
*
**
* The requested checksum mechanism for this block write.
*
*/
public boolean hasRequestedChecksum() {
return ((bitField0_ & 0x00000100) == 0x00000100);
}
/**
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*
*
**
* The requested checksum mechanism for this block write.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getRequestedChecksum() {
if (requestedChecksumBuilder_ == null) {
return requestedChecksum_;
} else {
return requestedChecksumBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*
*
**
* The requested checksum mechanism for this block write.
*
*/
public Builder setRequestedChecksum(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto value) {
if (requestedChecksumBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
requestedChecksum_ = value;
onChanged();
} else {
requestedChecksumBuilder_.setMessage(value);
}
bitField0_ |= 0x00000100;
return this;
}
/**
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*
*
**
* The requested checksum mechanism for this block write.
*
*/
public Builder setRequestedChecksum(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder builderForValue) {
if (requestedChecksumBuilder_ == null) {
requestedChecksum_ = builderForValue.build();
onChanged();
} else {
requestedChecksumBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000100;
return this;
}
/**
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*
*
**
* The requested checksum mechanism for this block write.
*
*/
public Builder mergeRequestedChecksum(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto value) {
if (requestedChecksumBuilder_ == null) {
if (((bitField0_ & 0x00000100) == 0x00000100) &&
requestedChecksum_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance()) {
requestedChecksum_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.newBuilder(requestedChecksum_).mergeFrom(value).buildPartial();
} else {
requestedChecksum_ = value;
}
onChanged();
} else {
requestedChecksumBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000100;
return this;
}
/**
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*
*
**
* The requested checksum mechanism for this block write.
*
*/
public Builder clearRequestedChecksum() {
if (requestedChecksumBuilder_ == null) {
requestedChecksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance();
onChanged();
} else {
requestedChecksumBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000100);
return this;
}
/**
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*
*
**
* The requested checksum mechanism for this block write.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder getRequestedChecksumBuilder() {
bitField0_ |= 0x00000100;
onChanged();
return getRequestedChecksumFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*
*
**
* The requested checksum mechanism for this block write.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getRequestedChecksumOrBuilder() {
if (requestedChecksumBuilder_ != null) {
return requestedChecksumBuilder_.getMessageOrBuilder();
} else {
return requestedChecksum_;
}
}
/**
* required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;
*
*
**
* The requested checksum mechanism for this block write.
*
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder>
getRequestedChecksumFieldBuilder() {
if (requestedChecksumBuilder_ == null) {
requestedChecksumBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder>(
requestedChecksum_,
getParentForChildren(),
isClean());
requestedChecksum_ = null;
}
return requestedChecksumBuilder_;
}
// optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto cachingStrategy_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder> cachingStrategyBuilder_;
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
public boolean hasCachingStrategy() {
return ((bitField0_ & 0x00000200) == 0x00000200);
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto getCachingStrategy() {
if (cachingStrategyBuilder_ == null) {
return cachingStrategy_;
} else {
return cachingStrategyBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
public Builder setCachingStrategy(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto value) {
if (cachingStrategyBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
cachingStrategy_ = value;
onChanged();
} else {
cachingStrategyBuilder_.setMessage(value);
}
bitField0_ |= 0x00000200;
return this;
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
public Builder setCachingStrategy(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder builderForValue) {
if (cachingStrategyBuilder_ == null) {
cachingStrategy_ = builderForValue.build();
onChanged();
} else {
cachingStrategyBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000200;
return this;
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
public Builder mergeCachingStrategy(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto value) {
if (cachingStrategyBuilder_ == null) {
if (((bitField0_ & 0x00000200) == 0x00000200) &&
cachingStrategy_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance()) {
cachingStrategy_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.newBuilder(cachingStrategy_).mergeFrom(value).buildPartial();
} else {
cachingStrategy_ = value;
}
onChanged();
} else {
cachingStrategyBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000200;
return this;
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
public Builder clearCachingStrategy() {
if (cachingStrategyBuilder_ == null) {
cachingStrategy_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance();
onChanged();
} else {
cachingStrategyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000200);
return this;
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder getCachingStrategyBuilder() {
bitField0_ |= 0x00000200;
onChanged();
return getCachingStrategyFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder getCachingStrategyOrBuilder() {
if (cachingStrategyBuilder_ != null) {
return cachingStrategyBuilder_.getMessageOrBuilder();
} else {
return cachingStrategy_;
}
}
/**
* optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder>
getCachingStrategyFieldBuilder() {
if (cachingStrategyBuilder_ == null) {
cachingStrategyBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder>(
cachingStrategy_,
getParentForChildren(),
isClean());
cachingStrategy_ = null;
}
return cachingStrategyBuilder_;
}
// optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];
*/
public boolean hasStorageType() {
return ((bitField0_ & 0x00000400) == 0x00000400);
}
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() {
return storageType_;
}
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];
*/
public Builder setStorageType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000400;
storageType_ = value;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];
*/
public Builder clearStorageType() {
bitField0_ = (bitField0_ & ~0x00000400);
storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
onChanged();
return this;
}
// repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
private java.util.List targetStorageTypes_ =
java.util.Collections.emptyList();
private void ensureTargetStorageTypesIsMutable() {
if (!((bitField0_ & 0x00000800) == 0x00000800)) {
targetStorageTypes_ = new java.util.ArrayList(targetStorageTypes_);
bitField0_ |= 0x00000800;
}
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
*/
public java.util.List getTargetStorageTypesList() {
return java.util.Collections.unmodifiableList(targetStorageTypes_);
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
*/
public int getTargetStorageTypesCount() {
return targetStorageTypes_.size();
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getTargetStorageTypes(int index) {
return targetStorageTypes_.get(index);
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
*/
public Builder setTargetStorageTypes(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetStorageTypesIsMutable();
targetStorageTypes_.set(index, value);
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
*/
public Builder addTargetStorageTypes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetStorageTypesIsMutable();
targetStorageTypes_.add(value);
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
*/
public Builder addAllTargetStorageTypes(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> values) {
ensureTargetStorageTypesIsMutable();
super.addAll(values, targetStorageTypes_);
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;
*/
public Builder clearTargetStorageTypes() {
targetStorageTypes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000800);
onChanged();
return this;
}
// optional bool allowLazyPersist = 13 [default = false];
private boolean allowLazyPersist_ ;
/**
* optional bool allowLazyPersist = 13 [default = false];
*
*
**
* Hint to the DataNode that the block can be allocated on transient
* storage i.e. memory and written to disk lazily. The DataNode is free
* to ignore this hint.
*
*/
public boolean hasAllowLazyPersist() {
return ((bitField0_ & 0x00001000) == 0x00001000);
}
/**
* optional bool allowLazyPersist = 13 [default = false];
*
*
**
* Hint to the DataNode that the block can be allocated on transient
* storage i.e. memory and written to disk lazily. The DataNode is free
* to ignore this hint.
*
*/
public boolean getAllowLazyPersist() {
return allowLazyPersist_;
}
/**
* optional bool allowLazyPersist = 13 [default = false];
*
*
**
* Hint to the DataNode that the block can be allocated on transient
* storage i.e. memory and written to disk lazily. The DataNode is free
* to ignore this hint.
*
*/
public Builder setAllowLazyPersist(boolean value) {
bitField0_ |= 0x00001000;
allowLazyPersist_ = value;
onChanged();
return this;
}
/**
* optional bool allowLazyPersist = 13 [default = false];
*
*
**
* Hint to the DataNode that the block can be allocated on transient
* storage i.e. memory and written to disk lazily. The DataNode is free
* to ignore this hint.
*
*/
public Builder clearAllowLazyPersist() {
bitField0_ = (bitField0_ & ~0x00001000);
allowLazyPersist_ = false;
onChanged();
return this;
}
// optional bool pinning = 14 [default = false];
private boolean pinning_ ;
/**
* optional bool pinning = 14 [default = false];
*
*
*whether to pin the block, so Balancer won't move it.
*
*/
public boolean hasPinning() {
return ((bitField0_ & 0x00002000) == 0x00002000);
}
/**
* optional bool pinning = 14 [default = false];
*
*
*whether to pin the block, so Balancer won't move it.
*
*/
public boolean getPinning() {
return pinning_;
}
/**
* optional bool pinning = 14 [default = false];
*
*
*whether to pin the block, so Balancer won't move it.
*
*/
public Builder setPinning(boolean value) {
bitField0_ |= 0x00002000;
pinning_ = value;
onChanged();
return this;
}
/**
* optional bool pinning = 14 [default = false];
*
*
*whether to pin the block, so Balancer won't move it.
*
*/
public Builder clearPinning() {
bitField0_ = (bitField0_ & ~0x00002000);
pinning_ = false;
onChanged();
return this;
}
// repeated bool targetPinnings = 15;
private java.util.List targetPinnings_ = java.util.Collections.emptyList();
private void ensureTargetPinningsIsMutable() {
if (!((bitField0_ & 0x00004000) == 0x00004000)) {
targetPinnings_ = new java.util.ArrayList(targetPinnings_);
bitField0_ |= 0x00004000;
}
}
/**
* repeated bool targetPinnings = 15;
*/
public java.util.List
getTargetPinningsList() {
return java.util.Collections.unmodifiableList(targetPinnings_);
}
/**
* repeated bool targetPinnings = 15;
*/
public int getTargetPinningsCount() {
return targetPinnings_.size();
}
/**
* repeated bool targetPinnings = 15;
*/
public boolean getTargetPinnings(int index) {
return targetPinnings_.get(index);
}
/**
* repeated bool targetPinnings = 15;
*/
public Builder setTargetPinnings(
int index, boolean value) {
ensureTargetPinningsIsMutable();
targetPinnings_.set(index, value);
onChanged();
return this;
}
/**
* repeated bool targetPinnings = 15;
*/
public Builder addTargetPinnings(boolean value) {
ensureTargetPinningsIsMutable();
targetPinnings_.add(value);
onChanged();
return this;
}
/**
* repeated bool targetPinnings = 15;
*/
public Builder addAllTargetPinnings(
java.lang.Iterable extends java.lang.Boolean> values) {
ensureTargetPinningsIsMutable();
super.addAll(values, targetPinnings_);
onChanged();
return this;
}
/**
* repeated bool targetPinnings = 15;
*/
public Builder clearTargetPinnings() {
targetPinnings_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00004000);
onChanged();
return this;
}
// optional string storageId = 16;
private java.lang.Object storageId_ = "";
/**
* optional string storageId = 16;
*/
public boolean hasStorageId() {
return ((bitField0_ & 0x00008000) == 0x00008000);
}
/**
* optional string storageId = 16;
*/
public java.lang.String getStorageId() {
java.lang.Object ref = storageId_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
storageId_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string storageId = 16;
*/
public com.google.protobuf.ByteString
getStorageIdBytes() {
java.lang.Object ref = storageId_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
storageId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* optional string storageId = 16;
*/
public Builder setStorageId(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00008000;
storageId_ = value;
onChanged();
return this;
}
/**
* optional string storageId = 16;
*/
public Builder clearStorageId() {
bitField0_ = (bitField0_ & ~0x00008000);
storageId_ = getDefaultInstance().getStorageId();
onChanged();
return this;
}
/**
* optional string storageId = 16;
*/
public Builder setStorageIdBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00008000;
storageId_ = value;
onChanged();
return this;
}
// repeated string targetStorageIds = 17;
private com.google.protobuf.LazyStringList targetStorageIds_ = com.google.protobuf.LazyStringArrayList.EMPTY;
private void ensureTargetStorageIdsIsMutable() {
if (!((bitField0_ & 0x00010000) == 0x00010000)) {
targetStorageIds_ = new com.google.protobuf.LazyStringArrayList(targetStorageIds_);
bitField0_ |= 0x00010000;
}
}
/**
* repeated string targetStorageIds = 17;
*/
public java.util.List
getTargetStorageIdsList() {
return java.util.Collections.unmodifiableList(targetStorageIds_);
}
/**
* repeated string targetStorageIds = 17;
*/
public int getTargetStorageIdsCount() {
return targetStorageIds_.size();
}
/**
* repeated string targetStorageIds = 17;
*/
public java.lang.String getTargetStorageIds(int index) {
return targetStorageIds_.get(index);
}
/**
* repeated string targetStorageIds = 17;
*/
public com.google.protobuf.ByteString
getTargetStorageIdsBytes(int index) {
return targetStorageIds_.getByteString(index);
}
/**
* repeated string targetStorageIds = 17;
*/
public Builder setTargetStorageIds(
int index, java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetStorageIdsIsMutable();
targetStorageIds_.set(index, value);
onChanged();
return this;
}
/**
* repeated string targetStorageIds = 17;
*/
public Builder addTargetStorageIds(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetStorageIdsIsMutable();
targetStorageIds_.add(value);
onChanged();
return this;
}
/**
* repeated string targetStorageIds = 17;
*/
public Builder addAllTargetStorageIds(
java.lang.Iterable values) {
ensureTargetStorageIdsIsMutable();
super.addAll(values, targetStorageIds_);
onChanged();
return this;
}
/**
* repeated string targetStorageIds = 17;
*/
public Builder clearTargetStorageIds() {
targetStorageIds_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00010000);
onChanged();
return this;
}
/**
* repeated string targetStorageIds = 17;
*/
public Builder addTargetStorageIdsBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetStorageIdsIsMutable();
targetStorageIds_.add(value);
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpWriteBlockProto)
}
static {
defaultInstance = new OpWriteBlockProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.OpWriteBlockProto)
}
public interface OpTransferBlockProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
boolean hasHeader();
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader();
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder();
// repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
java.util.List
getTargetsList();
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index);
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
int getTargetsCount();
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getTargetsOrBuilderList();
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
int index);
// repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;
*/
java.util.List getTargetStorageTypesList();
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;
*/
int getTargetStorageTypesCount();
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getTargetStorageTypes(int index);
// repeated string targetStorageIds = 4;
/**
* repeated string targetStorageIds = 4;
*/
java.util.List
getTargetStorageIdsList();
/**
* repeated string targetStorageIds = 4;
*/
int getTargetStorageIdsCount();
/**
* repeated string targetStorageIds = 4;
*/
java.lang.String getTargetStorageIds(int index);
/**
* repeated string targetStorageIds = 4;
*/
com.google.protobuf.ByteString
getTargetStorageIdsBytes(int index);
}
/**
* Protobuf type {@code hadoop.hdfs.OpTransferBlockProto}
*/
public static final class OpTransferBlockProto extends
com.google.protobuf.GeneratedMessage
implements OpTransferBlockProtoOrBuilder {
// Use OpTransferBlockProto.newBuilder() to construct.
private OpTransferBlockProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private OpTransferBlockProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final OpTransferBlockProto defaultInstance;
public static OpTransferBlockProto getDefaultInstance() {
return defaultInstance;
}
public OpTransferBlockProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private OpTransferBlockProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = header_.toBuilder();
}
header_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(header_);
header_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
targets_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000002;
}
targets_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.PARSER, extensionRegistry));
break;
}
case 24: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(3, rawValue);
} else {
if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
targetStorageTypes_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000004;
}
targetStorageTypes_.add(value);
}
break;
}
case 26: {
int length = input.readRawVarint32();
int oldLimit = input.pushLimit(length);
while(input.getBytesUntilLimit() > 0) {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(3, rawValue);
} else {
if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
targetStorageTypes_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000004;
}
targetStorageTypes_.add(value);
}
}
input.popLimit(oldLimit);
break;
}
case 34: {
if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
targetStorageIds_ = new com.google.protobuf.LazyStringArrayList();
mutable_bitField0_ |= 0x00000008;
}
targetStorageIds_.add(input.readBytes());
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
targets_ = java.util.Collections.unmodifiableList(targets_);
}
if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
targetStorageTypes_ = java.util.Collections.unmodifiableList(targetStorageTypes_);
}
if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
targetStorageIds_ = new com.google.protobuf.UnmodifiableLazyStringList(targetStorageIds_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpTransferBlockProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpTransferBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public OpTransferBlockProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new OpTransferBlockProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
public static final int HEADER_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_;
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
return header_;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
return header_;
}
// repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
public static final int TARGETS_FIELD_NUMBER = 2;
private java.util.List targets_;
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public java.util.List getTargetsList() {
return targets_;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getTargetsOrBuilderList() {
return targets_;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public int getTargetsCount() {
return targets_.size();
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index) {
return targets_.get(index);
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
int index) {
return targets_.get(index);
}
// repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;
public static final int TARGETSTORAGETYPES_FIELD_NUMBER = 3;
private java.util.List targetStorageTypes_;
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;
*/
public java.util.List getTargetStorageTypesList() {
return targetStorageTypes_;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;
*/
public int getTargetStorageTypesCount() {
return targetStorageTypes_.size();
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getTargetStorageTypes(int index) {
return targetStorageTypes_.get(index);
}
// repeated string targetStorageIds = 4;
public static final int TARGETSTORAGEIDS_FIELD_NUMBER = 4;
private com.google.protobuf.LazyStringList targetStorageIds_;
/**
* repeated string targetStorageIds = 4;
*/
public java.util.List
getTargetStorageIdsList() {
return targetStorageIds_;
}
/**
* repeated string targetStorageIds = 4;
*/
public int getTargetStorageIdsCount() {
return targetStorageIds_.size();
}
/**
* repeated string targetStorageIds = 4;
*/
public java.lang.String getTargetStorageIds(int index) {
return targetStorageIds_.get(index);
}
/**
* repeated string targetStorageIds = 4;
*/
public com.google.protobuf.ByteString
getTargetStorageIdsBytes(int index) {
return targetStorageIds_.getByteString(index);
}
private void initFields() {
header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
targets_ = java.util.Collections.emptyList();
targetStorageTypes_ = java.util.Collections.emptyList();
targetStorageIds_ = com.google.protobuf.LazyStringArrayList.EMPTY;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasHeader()) {
memoizedIsInitialized = 0;
return false;
}
if (!getHeader().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getTargetsCount(); i++) {
if (!getTargets(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, header_);
}
for (int i = 0; i < targets_.size(); i++) {
output.writeMessage(2, targets_.get(i));
}
for (int i = 0; i < targetStorageTypes_.size(); i++) {
output.writeEnum(3, targetStorageTypes_.get(i).getNumber());
}
for (int i = 0; i < targetStorageIds_.size(); i++) {
output.writeBytes(4, targetStorageIds_.getByteString(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, header_);
}
for (int i = 0; i < targets_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, targets_.get(i));
}
{
int dataSize = 0;
for (int i = 0; i < targetStorageTypes_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeEnumSizeNoTag(targetStorageTypes_.get(i).getNumber());
}
size += dataSize;
size += 1 * targetStorageTypes_.size();
}
{
int dataSize = 0;
for (int i = 0; i < targetStorageIds_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeBytesSizeNoTag(targetStorageIds_.getByteString(i));
}
size += dataSize;
size += 1 * getTargetStorageIdsList().size();
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto) obj;
boolean result = true;
result = result && (hasHeader() == other.hasHeader());
if (hasHeader()) {
result = result && getHeader()
.equals(other.getHeader());
}
result = result && getTargetsList()
.equals(other.getTargetsList());
result = result && getTargetStorageTypesList()
.equals(other.getTargetStorageTypesList());
result = result && getTargetStorageIdsList()
.equals(other.getTargetStorageIdsList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasHeader()) {
hash = (37 * hash) + HEADER_FIELD_NUMBER;
hash = (53 * hash) + getHeader().hashCode();
}
if (getTargetsCount() > 0) {
hash = (37 * hash) + TARGETS_FIELD_NUMBER;
hash = (53 * hash) + getTargetsList().hashCode();
}
if (getTargetStorageTypesCount() > 0) {
hash = (37 * hash) + TARGETSTORAGETYPES_FIELD_NUMBER;
hash = (53 * hash) + hashEnumList(getTargetStorageTypesList());
}
if (getTargetStorageIdsCount() > 0) {
hash = (37 * hash) + TARGETSTORAGEIDS_FIELD_NUMBER;
hash = (53 * hash) + getTargetStorageIdsList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.OpTransferBlockProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpTransferBlockProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpTransferBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getHeaderFieldBuilder();
getTargetsFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (headerBuilder_ == null) {
header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
if (targetsBuilder_ == null) {
targets_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
} else {
targetsBuilder_.clear();
}
targetStorageTypes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
targetStorageIds_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpTransferBlockProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (headerBuilder_ == null) {
result.header_ = header_;
} else {
result.header_ = headerBuilder_.build();
}
if (targetsBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002)) {
targets_ = java.util.Collections.unmodifiableList(targets_);
bitField0_ = (bitField0_ & ~0x00000002);
}
result.targets_ = targets_;
} else {
result.targets_ = targetsBuilder_.build();
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
targetStorageTypes_ = java.util.Collections.unmodifiableList(targetStorageTypes_);
bitField0_ = (bitField0_ & ~0x00000004);
}
result.targetStorageTypes_ = targetStorageTypes_;
if (((bitField0_ & 0x00000008) == 0x00000008)) {
targetStorageIds_ = new com.google.protobuf.UnmodifiableLazyStringList(
targetStorageIds_);
bitField0_ = (bitField0_ & ~0x00000008);
}
result.targetStorageIds_ = targetStorageIds_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.getDefaultInstance()) return this;
if (other.hasHeader()) {
mergeHeader(other.getHeader());
}
if (targetsBuilder_ == null) {
if (!other.targets_.isEmpty()) {
if (targets_.isEmpty()) {
targets_ = other.targets_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureTargetsIsMutable();
targets_.addAll(other.targets_);
}
onChanged();
}
} else {
if (!other.targets_.isEmpty()) {
if (targetsBuilder_.isEmpty()) {
targetsBuilder_.dispose();
targetsBuilder_ = null;
targets_ = other.targets_;
bitField0_ = (bitField0_ & ~0x00000002);
targetsBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getTargetsFieldBuilder() : null;
} else {
targetsBuilder_.addAllMessages(other.targets_);
}
}
}
if (!other.targetStorageTypes_.isEmpty()) {
if (targetStorageTypes_.isEmpty()) {
targetStorageTypes_ = other.targetStorageTypes_;
bitField0_ = (bitField0_ & ~0x00000004);
} else {
ensureTargetStorageTypesIsMutable();
targetStorageTypes_.addAll(other.targetStorageTypes_);
}
onChanged();
}
if (!other.targetStorageIds_.isEmpty()) {
if (targetStorageIds_.isEmpty()) {
targetStorageIds_ = other.targetStorageIds_;
bitField0_ = (bitField0_ & ~0x00000008);
} else {
ensureTargetStorageIdsIsMutable();
targetStorageIds_.addAll(other.targetStorageIds_);
}
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasHeader()) {
return false;
}
if (!getHeader().isInitialized()) {
return false;
}
for (int i = 0; i < getTargetsCount(); i++) {
if (!getTargets(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder> headerBuilder_;
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
if (headerBuilder_ == null) {
return header_;
} else {
return headerBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) {
if (headerBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
header_ = value;
onChanged();
} else {
headerBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public Builder setHeader(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder builderForValue) {
if (headerBuilder_ == null) {
header_ = builderForValue.build();
onChanged();
} else {
headerBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) {
if (headerBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance()) {
header_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial();
} else {
header_ = value;
}
onChanged();
} else {
headerBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public Builder clearHeader() {
if (headerBuilder_ == null) {
header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
onChanged();
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder getHeaderBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getHeaderFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
if (headerBuilder_ != null) {
return headerBuilder_.getMessageOrBuilder();
} else {
return header_;
}
}
/**
* required .hadoop.hdfs.ClientOperationHeaderProto header = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder>
getHeaderFieldBuilder() {
if (headerBuilder_ == null) {
headerBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder>(
header_,
getParentForChildren(),
isClean());
header_ = null;
}
return headerBuilder_;
}
// repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
private java.util.List targets_ =
java.util.Collections.emptyList();
private void ensureTargetsIsMutable() {
if (!((bitField0_ & 0x00000002) == 0x00000002)) {
targets_ = new java.util.ArrayList(targets_);
bitField0_ |= 0x00000002;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> targetsBuilder_;
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public java.util.List getTargetsList() {
if (targetsBuilder_ == null) {
return java.util.Collections.unmodifiableList(targets_);
} else {
return targetsBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public int getTargetsCount() {
if (targetsBuilder_ == null) {
return targets_.size();
} else {
return targetsBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index) {
if (targetsBuilder_ == null) {
return targets_.get(index);
} else {
return targetsBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder setTargets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (targetsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetsIsMutable();
targets_.set(index, value);
onChanged();
} else {
targetsBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder setTargets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
targets_.set(index, builderForValue.build());
onChanged();
} else {
targetsBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder addTargets(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (targetsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetsIsMutable();
targets_.add(value);
onChanged();
} else {
targetsBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder addTargets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (targetsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetsIsMutable();
targets_.add(index, value);
onChanged();
} else {
targetsBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder addTargets(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
targets_.add(builderForValue.build());
onChanged();
} else {
targetsBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder addTargets(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
targets_.add(index, builderForValue.build());
onChanged();
} else {
targetsBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder addAllTargets(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> values) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
super.addAll(values, targets_);
onChanged();
} else {
targetsBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder clearTargets() {
if (targetsBuilder_ == null) {
targets_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
} else {
targetsBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public Builder removeTargets(int index) {
if (targetsBuilder_ == null) {
ensureTargetsIsMutable();
targets_.remove(index);
onChanged();
} else {
targetsBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getTargetsBuilder(
int index) {
return getTargetsFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
int index) {
if (targetsBuilder_ == null) {
return targets_.get(index); } else {
return targetsBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public java.util.List extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getTargetsOrBuilderList() {
if (targetsBuilder_ != null) {
return targetsBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(targets_);
}
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addTargetsBuilder() {
return getTargetsFieldBuilder().addBuilder(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addTargetsBuilder(
int index) {
return getTargetsFieldBuilder().addBuilder(
index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;
*/
public java.util.List
getTargetsBuilderList() {
return getTargetsFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getTargetsFieldBuilder() {
if (targetsBuilder_ == null) {
targetsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>(
targets_,
((bitField0_ & 0x00000002) == 0x00000002),
getParentForChildren(),
isClean());
targets_ = null;
}
return targetsBuilder_;
}
// repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;
private java.util.List targetStorageTypes_ =
java.util.Collections.emptyList();
private void ensureTargetStorageTypesIsMutable() {
if (!((bitField0_ & 0x00000004) == 0x00000004)) {
targetStorageTypes_ = new java.util.ArrayList(targetStorageTypes_);
bitField0_ |= 0x00000004;
}
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;
*/
public java.util.List getTargetStorageTypesList() {
return java.util.Collections.unmodifiableList(targetStorageTypes_);
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;
*/
public int getTargetStorageTypesCount() {
return targetStorageTypes_.size();
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getTargetStorageTypes(int index) {
return targetStorageTypes_.get(index);
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;
*/
public Builder setTargetStorageTypes(
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetStorageTypesIsMutable();
targetStorageTypes_.set(index, value);
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;
*/
public Builder addTargetStorageTypes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetStorageTypesIsMutable();
targetStorageTypes_.add(value);
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;
*/
public Builder addAllTargetStorageTypes(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> values) {
ensureTargetStorageTypesIsMutable();
super.addAll(values, targetStorageTypes_);
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;
*/
public Builder clearTargetStorageTypes() {
targetStorageTypes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
onChanged();
return this;
}
// repeated string targetStorageIds = 4;
private com.google.protobuf.LazyStringList targetStorageIds_ = com.google.protobuf.LazyStringArrayList.EMPTY;
private void ensureTargetStorageIdsIsMutable() {
if (!((bitField0_ & 0x00000008) == 0x00000008)) {
targetStorageIds_ = new com.google.protobuf.LazyStringArrayList(targetStorageIds_);
bitField0_ |= 0x00000008;
}
}
/**
* repeated string targetStorageIds = 4;
*/
public java.util.List
getTargetStorageIdsList() {
return java.util.Collections.unmodifiableList(targetStorageIds_);
}
/**
* repeated string targetStorageIds = 4;
*/
public int getTargetStorageIdsCount() {
return targetStorageIds_.size();
}
/**
* repeated string targetStorageIds = 4;
*/
public java.lang.String getTargetStorageIds(int index) {
return targetStorageIds_.get(index);
}
/**
* repeated string targetStorageIds = 4;
*/
public com.google.protobuf.ByteString
getTargetStorageIdsBytes(int index) {
return targetStorageIds_.getByteString(index);
}
/**
* repeated string targetStorageIds = 4;
*/
public Builder setTargetStorageIds(
int index, java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetStorageIdsIsMutable();
targetStorageIds_.set(index, value);
onChanged();
return this;
}
/**
* repeated string targetStorageIds = 4;
*/
public Builder addTargetStorageIds(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetStorageIdsIsMutable();
targetStorageIds_.add(value);
onChanged();
return this;
}
/**
* repeated string targetStorageIds = 4;
*/
public Builder addAllTargetStorageIds(
java.lang.Iterable values) {
ensureTargetStorageIdsIsMutable();
super.addAll(values, targetStorageIds_);
onChanged();
return this;
}
/**
* repeated string targetStorageIds = 4;
*/
public Builder clearTargetStorageIds() {
targetStorageIds_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000008);
onChanged();
return this;
}
/**
* repeated string targetStorageIds = 4;
*/
public Builder addTargetStorageIdsBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetStorageIdsIsMutable();
targetStorageIds_.add(value);
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpTransferBlockProto)
}
static {
defaultInstance = new OpTransferBlockProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.OpTransferBlockProto)
}
public interface OpReplaceBlockProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.BaseHeaderProto header = 1;
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
boolean hasHeader();
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader();
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder();
// required string delHint = 2;
/**
* required string delHint = 2;
*/
boolean hasDelHint();
/**
* required string delHint = 2;
*/
java.lang.String getDelHint();
/**
* required string delHint = 2;
*/
com.google.protobuf.ByteString
getDelHintBytes();
// required .hadoop.hdfs.DatanodeInfoProto source = 3;
/**
* required .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
boolean hasSource();
/**
* required .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource();
/**
* required .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder();
// optional .hadoop.hdfs.StorageTypeProto storageType = 4 [default = DISK];
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 4 [default = DISK];
*/
boolean hasStorageType();
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 4 [default = DISK];
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType();
// optional string storageId = 5;
/**
* optional string storageId = 5;
*/
boolean hasStorageId();
/**
* optional string storageId = 5;
*/
java.lang.String getStorageId();
/**
* optional string storageId = 5;
*/
com.google.protobuf.ByteString
getStorageIdBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.OpReplaceBlockProto}
*/
public static final class OpReplaceBlockProto extends
com.google.protobuf.GeneratedMessage
implements OpReplaceBlockProtoOrBuilder {
// Use OpReplaceBlockProto.newBuilder() to construct.
private OpReplaceBlockProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private OpReplaceBlockProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final OpReplaceBlockProto defaultInstance;
public static OpReplaceBlockProto getDefaultInstance() {
return defaultInstance;
}
public OpReplaceBlockProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private OpReplaceBlockProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = header_.toBuilder();
}
header_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(header_);
header_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
bitField0_ |= 0x00000002;
delHint_ = input.readBytes();
break;
}
case 26: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
subBuilder = source_.toBuilder();
}
source_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(source_);
source_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000004;
break;
}
case 32: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(4, rawValue);
} else {
bitField0_ |= 0x00000008;
storageType_ = value;
}
break;
}
case 42: {
bitField0_ |= 0x00000010;
storageId_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpReplaceBlockProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpReplaceBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public OpReplaceBlockProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new OpReplaceBlockProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.BaseHeaderProto header = 1;
public static final int HEADER_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_;
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
return header_;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
return header_;
}
// required string delHint = 2;
public static final int DELHINT_FIELD_NUMBER = 2;
private java.lang.Object delHint_;
/**
* required string delHint = 2;
*/
public boolean hasDelHint() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string delHint = 2;
*/
public java.lang.String getDelHint() {
java.lang.Object ref = delHint_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
delHint_ = s;
}
return s;
}
}
/**
* required string delHint = 2;
*/
public com.google.protobuf.ByteString
getDelHintBytes() {
java.lang.Object ref = delHint_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
delHint_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required .hadoop.hdfs.DatanodeInfoProto source = 3;
public static final int SOURCE_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto source_;
/**
* required .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public boolean hasSource() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource() {
return source_;
}
/**
* required .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder() {
return source_;
}
// optional .hadoop.hdfs.StorageTypeProto storageType = 4 [default = DISK];
public static final int STORAGETYPE_FIELD_NUMBER = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto storageType_;
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 4 [default = DISK];
*/
public boolean hasStorageType() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 4 [default = DISK];
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() {
return storageType_;
}
// optional string storageId = 5;
public static final int STORAGEID_FIELD_NUMBER = 5;
private java.lang.Object storageId_;
/**
* optional string storageId = 5;
*/
public boolean hasStorageId() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional string storageId = 5;
*/
public java.lang.String getStorageId() {
java.lang.Object ref = storageId_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
storageId_ = s;
}
return s;
}
}
/**
* optional string storageId = 5;
*/
public com.google.protobuf.ByteString
getStorageIdBytes() {
java.lang.Object ref = storageId_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
storageId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
delHint_ = "";
source_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance();
storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
storageId_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasHeader()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasDelHint()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasSource()) {
memoizedIsInitialized = 0;
return false;
}
if (!getHeader().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
if (!getSource().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, header_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getDelHintBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeMessage(3, source_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeEnum(4, storageType_.getNumber());
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeBytes(5, getStorageIdBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, header_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getDelHintBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, source_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(4, storageType_.getNumber());
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(5, getStorageIdBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto) obj;
boolean result = true;
result = result && (hasHeader() == other.hasHeader());
if (hasHeader()) {
result = result && getHeader()
.equals(other.getHeader());
}
result = result && (hasDelHint() == other.hasDelHint());
if (hasDelHint()) {
result = result && getDelHint()
.equals(other.getDelHint());
}
result = result && (hasSource() == other.hasSource());
if (hasSource()) {
result = result && getSource()
.equals(other.getSource());
}
result = result && (hasStorageType() == other.hasStorageType());
if (hasStorageType()) {
result = result &&
(getStorageType() == other.getStorageType());
}
result = result && (hasStorageId() == other.hasStorageId());
if (hasStorageId()) {
result = result && getStorageId()
.equals(other.getStorageId());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasHeader()) {
hash = (37 * hash) + HEADER_FIELD_NUMBER;
hash = (53 * hash) + getHeader().hashCode();
}
if (hasDelHint()) {
hash = (37 * hash) + DELHINT_FIELD_NUMBER;
hash = (53 * hash) + getDelHint().hashCode();
}
if (hasSource()) {
hash = (37 * hash) + SOURCE_FIELD_NUMBER;
hash = (53 * hash) + getSource().hashCode();
}
if (hasStorageType()) {
hash = (37 * hash) + STORAGETYPE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getStorageType());
}
if (hasStorageId()) {
hash = (37 * hash) + STORAGEID_FIELD_NUMBER;
hash = (53 * hash) + getStorageId().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.OpReplaceBlockProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpReplaceBlockProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpReplaceBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getHeaderFieldBuilder();
getSourceFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (headerBuilder_ == null) {
header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
delHint_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
if (sourceBuilder_ == null) {
source_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance();
} else {
sourceBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
bitField0_ = (bitField0_ & ~0x00000008);
storageId_ = "";
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpReplaceBlockProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (headerBuilder_ == null) {
result.header_ = header_;
} else {
result.header_ = headerBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.delHint_ = delHint_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
if (sourceBuilder_ == null) {
result.source_ = source_;
} else {
result.source_ = sourceBuilder_.build();
}
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.storageType_ = storageType_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.storageId_ = storageId_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.getDefaultInstance()) return this;
if (other.hasHeader()) {
mergeHeader(other.getHeader());
}
if (other.hasDelHint()) {
bitField0_ |= 0x00000002;
delHint_ = other.delHint_;
onChanged();
}
if (other.hasSource()) {
mergeSource(other.getSource());
}
if (other.hasStorageType()) {
setStorageType(other.getStorageType());
}
if (other.hasStorageId()) {
bitField0_ |= 0x00000010;
storageId_ = other.storageId_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasHeader()) {
return false;
}
if (!hasDelHint()) {
return false;
}
if (!hasSource()) {
return false;
}
if (!getHeader().isInitialized()) {
return false;
}
if (!getSource().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.BaseHeaderProto header = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> headerBuilder_;
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
if (headerBuilder_ == null) {
return header_;
} else {
return headerBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
if (headerBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
header_ = value;
onChanged();
} else {
headerBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder setHeader(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder builderForValue) {
if (headerBuilder_ == null) {
header_ = builderForValue.build();
onChanged();
} else {
headerBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
if (headerBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
header_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial();
} else {
header_ = value;
}
onChanged();
} else {
headerBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder clearHeader() {
if (headerBuilder_ == null) {
header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
onChanged();
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder getHeaderBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getHeaderFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
if (headerBuilder_ != null) {
return headerBuilder_.getMessageOrBuilder();
} else {
return header_;
}
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>
getHeaderFieldBuilder() {
if (headerBuilder_ == null) {
headerBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>(
header_,
getParentForChildren(),
isClean());
header_ = null;
}
return headerBuilder_;
}
// required string delHint = 2;
private java.lang.Object delHint_ = "";
/**
* required string delHint = 2;
*/
public boolean hasDelHint() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required string delHint = 2;
*/
public java.lang.String getDelHint() {
java.lang.Object ref = delHint_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
delHint_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string delHint = 2;
*/
public com.google.protobuf.ByteString
getDelHintBytes() {
java.lang.Object ref = delHint_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
delHint_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string delHint = 2;
*/
public Builder setDelHint(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
delHint_ = value;
onChanged();
return this;
}
/**
* required string delHint = 2;
*/
public Builder clearDelHint() {
bitField0_ = (bitField0_ & ~0x00000002);
delHint_ = getDefaultInstance().getDelHint();
onChanged();
return this;
}
/**
* required string delHint = 2;
*/
public Builder setDelHintBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
delHint_ = value;
onChanged();
return this;
}
// required .hadoop.hdfs.DatanodeInfoProto source = 3;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto source_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> sourceBuilder_;
/**
* required .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public boolean hasSource() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource() {
if (sourceBuilder_ == null) {
return source_;
} else {
return sourceBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public Builder setSource(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (sourceBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
source_ = value;
onChanged();
} else {
sourceBuilder_.setMessage(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* required .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public Builder setSource(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
if (sourceBuilder_ == null) {
source_ = builderForValue.build();
onChanged();
} else {
sourceBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000004;
return this;
}
/**
* required .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public Builder mergeSource(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
if (sourceBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004) &&
source_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()) {
source_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder(source_).mergeFrom(value).buildPartial();
} else {
source_ = value;
}
onChanged();
} else {
sourceBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* required .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public Builder clearSource() {
if (sourceBuilder_ == null) {
source_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance();
onChanged();
} else {
sourceBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
/**
* required .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getSourceBuilder() {
bitField0_ |= 0x00000004;
onChanged();
return getSourceFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder() {
if (sourceBuilder_ != null) {
return sourceBuilder_.getMessageOrBuilder();
} else {
return source_;
}
}
/**
* required .hadoop.hdfs.DatanodeInfoProto source = 3;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
getSourceFieldBuilder() {
if (sourceBuilder_ == null) {
sourceBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>(
source_,
getParentForChildren(),
isClean());
source_ = null;
}
return sourceBuilder_;
}
// optional .hadoop.hdfs.StorageTypeProto storageType = 4 [default = DISK];
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 4 [default = DISK];
*/
public boolean hasStorageType() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 4 [default = DISK];
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() {
return storageType_;
}
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 4 [default = DISK];
*/
public Builder setStorageType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
storageType_ = value;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.StorageTypeProto storageType = 4 [default = DISK];
*/
public Builder clearStorageType() {
bitField0_ = (bitField0_ & ~0x00000008);
storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
onChanged();
return this;
}
// optional string storageId = 5;
private java.lang.Object storageId_ = "";
/**
* optional string storageId = 5;
*/
public boolean hasStorageId() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional string storageId = 5;
*/
public java.lang.String getStorageId() {
java.lang.Object ref = storageId_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
storageId_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string storageId = 5;
*/
public com.google.protobuf.ByteString
getStorageIdBytes() {
java.lang.Object ref = storageId_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
storageId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* optional string storageId = 5;
*/
public Builder setStorageId(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000010;
storageId_ = value;
onChanged();
return this;
}
/**
* optional string storageId = 5;
*/
public Builder clearStorageId() {
bitField0_ = (bitField0_ & ~0x00000010);
storageId_ = getDefaultInstance().getStorageId();
onChanged();
return this;
}
/**
* optional string storageId = 5;
*/
public Builder setStorageIdBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000010;
storageId_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpReplaceBlockProto)
}
static {
defaultInstance = new OpReplaceBlockProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.OpReplaceBlockProto)
}
public interface OpCopyBlockProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.BaseHeaderProto header = 1;
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
boolean hasHeader();
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader();
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.OpCopyBlockProto}
*/
public static final class OpCopyBlockProto extends
com.google.protobuf.GeneratedMessage
implements OpCopyBlockProtoOrBuilder {
// Use OpCopyBlockProto.newBuilder() to construct.
private OpCopyBlockProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private OpCopyBlockProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final OpCopyBlockProto defaultInstance;
public static OpCopyBlockProto getDefaultInstance() {
return defaultInstance;
}
public OpCopyBlockProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private OpCopyBlockProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = header_.toBuilder();
}
header_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(header_);
header_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCopyBlockProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCopyBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public OpCopyBlockProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new OpCopyBlockProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.BaseHeaderProto header = 1;
public static final int HEADER_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_;
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
return header_;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
return header_;
}
private void initFields() {
header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasHeader()) {
memoizedIsInitialized = 0;
return false;
}
if (!getHeader().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, header_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, header_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto) obj;
boolean result = true;
result = result && (hasHeader() == other.hasHeader());
if (hasHeader()) {
result = result && getHeader()
.equals(other.getHeader());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasHeader()) {
hash = (37 * hash) + HEADER_FIELD_NUMBER;
hash = (53 * hash) + getHeader().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.OpCopyBlockProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCopyBlockProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCopyBlockProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getHeaderFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (headerBuilder_ == null) {
header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCopyBlockProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (headerBuilder_ == null) {
result.header_ = header_;
} else {
result.header_ = headerBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.getDefaultInstance()) return this;
if (other.hasHeader()) {
mergeHeader(other.getHeader());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasHeader()) {
return false;
}
if (!getHeader().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.BaseHeaderProto header = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> headerBuilder_;
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
if (headerBuilder_ == null) {
return header_;
} else {
return headerBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
if (headerBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
header_ = value;
onChanged();
} else {
headerBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder setHeader(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder builderForValue) {
if (headerBuilder_ == null) {
header_ = builderForValue.build();
onChanged();
} else {
headerBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
if (headerBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
header_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial();
} else {
header_ = value;
}
onChanged();
} else {
headerBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder clearHeader() {
if (headerBuilder_ == null) {
header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
onChanged();
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder getHeaderBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getHeaderFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
if (headerBuilder_ != null) {
return headerBuilder_.getMessageOrBuilder();
} else {
return header_;
}
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>
getHeaderFieldBuilder() {
if (headerBuilder_ == null) {
headerBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>(
header_,
getParentForChildren(),
isClean());
header_ = null;
}
return headerBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpCopyBlockProto)
}
static {
defaultInstance = new OpCopyBlockProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.OpCopyBlockProto)
}
public interface OpBlockChecksumProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.BaseHeaderProto header = 1;
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
boolean hasHeader();
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader();
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder();
// optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
*/
boolean hasBlockChecksumOptions();
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions();
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.OpBlockChecksumProto}
*/
public static final class OpBlockChecksumProto extends
com.google.protobuf.GeneratedMessage
implements OpBlockChecksumProtoOrBuilder {
// Use OpBlockChecksumProto.newBuilder() to construct.
private OpBlockChecksumProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private OpBlockChecksumProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final OpBlockChecksumProto defaultInstance;
public static OpBlockChecksumProto getDefaultInstance() {
return defaultInstance;
}
public OpBlockChecksumProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private OpBlockChecksumProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = header_.toBuilder();
}
header_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(header_);
header_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
subBuilder = blockChecksumOptions_.toBuilder();
}
blockChecksumOptions_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(blockChecksumOptions_);
blockChecksumOptions_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public OpBlockChecksumProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new OpBlockChecksumProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.BaseHeaderProto header = 1;
public static final int HEADER_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_;
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
return header_;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
return header_;
}
// optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
public static final int BLOCKCHECKSUMOPTIONS_FIELD_NUMBER = 2;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto blockChecksumOptions_;
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
*/
public boolean hasBlockChecksumOptions() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions() {
return blockChecksumOptions_;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder() {
return blockChecksumOptions_;
}
private void initFields() {
header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
blockChecksumOptions_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasHeader()) {
memoizedIsInitialized = 0;
return false;
}
if (!getHeader().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, header_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeMessage(2, blockChecksumOptions_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, header_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, blockChecksumOptions_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto) obj;
boolean result = true;
result = result && (hasHeader() == other.hasHeader());
if (hasHeader()) {
result = result && getHeader()
.equals(other.getHeader());
}
result = result && (hasBlockChecksumOptions() == other.hasBlockChecksumOptions());
if (hasBlockChecksumOptions()) {
result = result && getBlockChecksumOptions()
.equals(other.getBlockChecksumOptions());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasHeader()) {
hash = (37 * hash) + HEADER_FIELD_NUMBER;
hash = (53 * hash) + getHeader().hashCode();
}
if (hasBlockChecksumOptions()) {
hash = (37 * hash) + BLOCKCHECKSUMOPTIONS_FIELD_NUMBER;
hash = (53 * hash) + getBlockChecksumOptions().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.OpBlockChecksumProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getHeaderFieldBuilder();
getBlockChecksumOptionsFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (headerBuilder_ == null) {
header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
if (blockChecksumOptionsBuilder_ == null) {
blockChecksumOptions_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance();
} else {
blockChecksumOptionsBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (headerBuilder_ == null) {
result.header_ = header_;
} else {
result.header_ = headerBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
if (blockChecksumOptionsBuilder_ == null) {
result.blockChecksumOptions_ = blockChecksumOptions_;
} else {
result.blockChecksumOptions_ = blockChecksumOptionsBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.getDefaultInstance()) return this;
if (other.hasHeader()) {
mergeHeader(other.getHeader());
}
if (other.hasBlockChecksumOptions()) {
mergeBlockChecksumOptions(other.getBlockChecksumOptions());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasHeader()) {
return false;
}
if (!getHeader().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.BaseHeaderProto header = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> headerBuilder_;
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
if (headerBuilder_ == null) {
return header_;
} else {
return headerBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
if (headerBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
header_ = value;
onChanged();
} else {
headerBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder setHeader(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder builderForValue) {
if (headerBuilder_ == null) {
header_ = builderForValue.build();
onChanged();
} else {
headerBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
if (headerBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
header_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial();
} else {
header_ = value;
}
onChanged();
} else {
headerBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder clearHeader() {
if (headerBuilder_ == null) {
header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
onChanged();
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder getHeaderBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getHeaderFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
if (headerBuilder_ != null) {
return headerBuilder_.getMessageOrBuilder();
} else {
return header_;
}
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>
getHeaderFieldBuilder() {
if (headerBuilder_ == null) {
headerBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>(
header_,
getParentForChildren(),
isClean());
header_ = null;
}
return headerBuilder_;
}
// optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto blockChecksumOptions_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder> blockChecksumOptionsBuilder_;
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
*/
public boolean hasBlockChecksumOptions() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions() {
if (blockChecksumOptionsBuilder_ == null) {
return blockChecksumOptions_;
} else {
return blockChecksumOptionsBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
*/
public Builder setBlockChecksumOptions(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto value) {
if (blockChecksumOptionsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
blockChecksumOptions_ = value;
onChanged();
} else {
blockChecksumOptionsBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
*/
public Builder setBlockChecksumOptions(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder builderForValue) {
if (blockChecksumOptionsBuilder_ == null) {
blockChecksumOptions_ = builderForValue.build();
onChanged();
} else {
blockChecksumOptionsBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
*/
public Builder mergeBlockChecksumOptions(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto value) {
if (blockChecksumOptionsBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002) &&
blockChecksumOptions_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance()) {
blockChecksumOptions_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.newBuilder(blockChecksumOptions_).mergeFrom(value).buildPartial();
} else {
blockChecksumOptions_ = value;
}
onChanged();
} else {
blockChecksumOptionsBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
*/
public Builder clearBlockChecksumOptions() {
if (blockChecksumOptionsBuilder_ == null) {
blockChecksumOptions_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance();
onChanged();
} else {
blockChecksumOptionsBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder getBlockChecksumOptionsBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getBlockChecksumOptionsFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder() {
if (blockChecksumOptionsBuilder_ != null) {
return blockChecksumOptionsBuilder_.getMessageOrBuilder();
} else {
return blockChecksumOptions_;
}
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder>
getBlockChecksumOptionsFieldBuilder() {
if (blockChecksumOptionsBuilder_ == null) {
blockChecksumOptionsBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder>(
blockChecksumOptions_,
getParentForChildren(),
isClean());
blockChecksumOptions_ = null;
}
return blockChecksumOptionsBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpBlockChecksumProto)
}
static {
defaultInstance = new OpBlockChecksumProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.OpBlockChecksumProto)
}
public interface OpBlockGroupChecksumProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.BaseHeaderProto header = 1;
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
boolean hasHeader();
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader();
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder();
// required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
/**
* required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
*/
boolean hasDatanodes();
/**
* required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getDatanodes();
/**
* required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder getDatanodesOrBuilder();
// repeated .hadoop.common.TokenProto blockTokens = 3;
/**
* repeated .hadoop.common.TokenProto blockTokens = 3;
*
*
* each internal block has a block token
*
*/
java.util.List
getBlockTokensList();
/**
* repeated .hadoop.common.TokenProto blockTokens = 3;
*
*
* each internal block has a block token
*
*/
org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockTokens(int index);
/**
* repeated .hadoop.common.TokenProto blockTokens = 3;
*
*
* each internal block has a block token
*
*/
int getBlockTokensCount();
/**
* repeated .hadoop.common.TokenProto blockTokens = 3;
*
*
* each internal block has a block token
*
*/
java.util.List extends org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>
getBlockTokensOrBuilderList();
/**
* repeated .hadoop.common.TokenProto blockTokens = 3;
*
*
* each internal block has a block token
*
*/
org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokensOrBuilder(
int index);
// required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
boolean hasEcPolicy();
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy();
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder();
// repeated uint32 blockIndices = 5;
/**
* repeated uint32 blockIndices = 5;
*/
java.util.List getBlockIndicesList();
/**
* repeated uint32 blockIndices = 5;
*/
int getBlockIndicesCount();
/**
* repeated uint32 blockIndices = 5;
*/
int getBlockIndices(int index);
// required uint64 requestedNumBytes = 6;
/**
* required uint64 requestedNumBytes = 6;
*/
boolean hasRequestedNumBytes();
/**
* required uint64 requestedNumBytes = 6;
*/
long getRequestedNumBytes();
// optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
boolean hasBlockChecksumOptions();
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions();
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.OpBlockGroupChecksumProto}
*/
public static final class OpBlockGroupChecksumProto extends
com.google.protobuf.GeneratedMessage
implements OpBlockGroupChecksumProtoOrBuilder {
// Use OpBlockGroupChecksumProto.newBuilder() to construct.
private OpBlockGroupChecksumProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private OpBlockGroupChecksumProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final OpBlockGroupChecksumProto defaultInstance;
public static OpBlockGroupChecksumProto getDefaultInstance() {
return defaultInstance;
}
public OpBlockGroupChecksumProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private OpBlockGroupChecksumProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = header_.toBuilder();
}
header_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(header_);
header_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
subBuilder = datanodes_.toBuilder();
}
datanodes_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(datanodes_);
datanodes_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
case 26: {
if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
blockTokens_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000004;
}
blockTokens_.add(input.readMessage(org.apache.hadoop.security.proto.SecurityProtos.TokenProto.PARSER, extensionRegistry));
break;
}
case 34: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
subBuilder = ecPolicy_.toBuilder();
}
ecPolicy_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(ecPolicy_);
ecPolicy_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000004;
break;
}
case 40: {
if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
blockIndices_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000010;
}
blockIndices_.add(input.readUInt32());
break;
}
case 42: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00000010) == 0x00000010) && input.getBytesUntilLimit() > 0) {
blockIndices_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000010;
}
while (input.getBytesUntilLimit() > 0) {
blockIndices_.add(input.readUInt32());
}
input.popLimit(limit);
break;
}
case 48: {
bitField0_ |= 0x00000008;
requestedNumBytes_ = input.readUInt64();
break;
}
case 58: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000010) == 0x00000010)) {
subBuilder = blockChecksumOptions_.toBuilder();
}
blockChecksumOptions_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(blockChecksumOptions_);
blockChecksumOptions_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000010;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
blockTokens_ = java.util.Collections.unmodifiableList(blockTokens_);
}
if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
blockIndices_ = java.util.Collections.unmodifiableList(blockIndices_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public OpBlockGroupChecksumProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new OpBlockGroupChecksumProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.BaseHeaderProto header = 1;
public static final int HEADER_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_;
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
return header_;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
return header_;
}
// required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
public static final int DATANODES_FIELD_NUMBER = 2;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto datanodes_;
/**
* required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
*/
public boolean hasDatanodes() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getDatanodes() {
return datanodes_;
}
/**
* required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder getDatanodesOrBuilder() {
return datanodes_;
}
// repeated .hadoop.common.TokenProto blockTokens = 3;
public static final int BLOCKTOKENS_FIELD_NUMBER = 3;
private java.util.List blockTokens_;
/**
* repeated .hadoop.common.TokenProto blockTokens = 3;
*
*
* each internal block has a block token
*
*/
public java.util.List getBlockTokensList() {
return blockTokens_;
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 3;
*
*
* each internal block has a block token
*
*/
public java.util.List extends org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>
getBlockTokensOrBuilderList() {
return blockTokens_;
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 3;
*
*
* each internal block has a block token
*
*/
public int getBlockTokensCount() {
return blockTokens_.size();
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 3;
*
*
* each internal block has a block token
*
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockTokens(int index) {
return blockTokens_.get(index);
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 3;
*
*
* each internal block has a block token
*
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokensOrBuilder(
int index) {
return blockTokens_.get(index);
}
// required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
public static final int ECPOLICY_FIELD_NUMBER = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto ecPolicy_;
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public boolean hasEcPolicy() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy() {
return ecPolicy_;
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder() {
return ecPolicy_;
}
// repeated uint32 blockIndices = 5;
public static final int BLOCKINDICES_FIELD_NUMBER = 5;
private java.util.List blockIndices_;
/**
* repeated uint32 blockIndices = 5;
*/
public java.util.List
getBlockIndicesList() {
return blockIndices_;
}
/**
* repeated uint32 blockIndices = 5;
*/
public int getBlockIndicesCount() {
return blockIndices_.size();
}
/**
* repeated uint32 blockIndices = 5;
*/
public int getBlockIndices(int index) {
return blockIndices_.get(index);
}
// required uint64 requestedNumBytes = 6;
public static final int REQUESTEDNUMBYTES_FIELD_NUMBER = 6;
private long requestedNumBytes_;
/**
* required uint64 requestedNumBytes = 6;
*/
public boolean hasRequestedNumBytes() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required uint64 requestedNumBytes = 6;
*/
public long getRequestedNumBytes() {
return requestedNumBytes_;
}
// optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
public static final int BLOCKCHECKSUMOPTIONS_FIELD_NUMBER = 7;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto blockChecksumOptions_;
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
public boolean hasBlockChecksumOptions() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions() {
return blockChecksumOptions_;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder() {
return blockChecksumOptions_;
}
private void initFields() {
header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
datanodes_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance();
blockTokens_ = java.util.Collections.emptyList();
ecPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance();
blockIndices_ = java.util.Collections.emptyList();
requestedNumBytes_ = 0L;
blockChecksumOptions_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasHeader()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasDatanodes()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasEcPolicy()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasRequestedNumBytes()) {
memoizedIsInitialized = 0;
return false;
}
if (!getHeader().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
if (!getDatanodes().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getBlockTokensCount(); i++) {
if (!getBlockTokens(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (!getEcPolicy().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, header_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeMessage(2, datanodes_);
}
for (int i = 0; i < blockTokens_.size(); i++) {
output.writeMessage(3, blockTokens_.get(i));
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeMessage(4, ecPolicy_);
}
for (int i = 0; i < blockIndices_.size(); i++) {
output.writeUInt32(5, blockIndices_.get(i));
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt64(6, requestedNumBytes_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeMessage(7, blockChecksumOptions_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, header_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, datanodes_);
}
for (int i = 0; i < blockTokens_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, blockTokens_.get(i));
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(4, ecPolicy_);
}
{
int dataSize = 0;
for (int i = 0; i < blockIndices_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeUInt32SizeNoTag(blockIndices_.get(i));
}
size += dataSize;
size += 1 * getBlockIndicesList().size();
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(6, requestedNumBytes_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(7, blockChecksumOptions_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto) obj;
boolean result = true;
result = result && (hasHeader() == other.hasHeader());
if (hasHeader()) {
result = result && getHeader()
.equals(other.getHeader());
}
result = result && (hasDatanodes() == other.hasDatanodes());
if (hasDatanodes()) {
result = result && getDatanodes()
.equals(other.getDatanodes());
}
result = result && getBlockTokensList()
.equals(other.getBlockTokensList());
result = result && (hasEcPolicy() == other.hasEcPolicy());
if (hasEcPolicy()) {
result = result && getEcPolicy()
.equals(other.getEcPolicy());
}
result = result && getBlockIndicesList()
.equals(other.getBlockIndicesList());
result = result && (hasRequestedNumBytes() == other.hasRequestedNumBytes());
if (hasRequestedNumBytes()) {
result = result && (getRequestedNumBytes()
== other.getRequestedNumBytes());
}
result = result && (hasBlockChecksumOptions() == other.hasBlockChecksumOptions());
if (hasBlockChecksumOptions()) {
result = result && getBlockChecksumOptions()
.equals(other.getBlockChecksumOptions());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasHeader()) {
hash = (37 * hash) + HEADER_FIELD_NUMBER;
hash = (53 * hash) + getHeader().hashCode();
}
if (hasDatanodes()) {
hash = (37 * hash) + DATANODES_FIELD_NUMBER;
hash = (53 * hash) + getDatanodes().hashCode();
}
if (getBlockTokensCount() > 0) {
hash = (37 * hash) + BLOCKTOKENS_FIELD_NUMBER;
hash = (53 * hash) + getBlockTokensList().hashCode();
}
if (hasEcPolicy()) {
hash = (37 * hash) + ECPOLICY_FIELD_NUMBER;
hash = (53 * hash) + getEcPolicy().hashCode();
}
if (getBlockIndicesCount() > 0) {
hash = (37 * hash) + BLOCKINDICES_FIELD_NUMBER;
hash = (53 * hash) + getBlockIndicesList().hashCode();
}
if (hasRequestedNumBytes()) {
hash = (37 * hash) + REQUESTEDNUMBYTES_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getRequestedNumBytes());
}
if (hasBlockChecksumOptions()) {
hash = (37 * hash) + BLOCKCHECKSUMOPTIONS_FIELD_NUMBER;
hash = (53 * hash) + getBlockChecksumOptions().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.OpBlockGroupChecksumProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getHeaderFieldBuilder();
getDatanodesFieldBuilder();
getBlockTokensFieldBuilder();
getEcPolicyFieldBuilder();
getBlockChecksumOptionsFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (headerBuilder_ == null) {
header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
if (datanodesBuilder_ == null) {
datanodes_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance();
} else {
datanodesBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
if (blockTokensBuilder_ == null) {
blockTokens_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
} else {
blockTokensBuilder_.clear();
}
if (ecPolicyBuilder_ == null) {
ecPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance();
} else {
ecPolicyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
blockIndices_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000010);
requestedNumBytes_ = 0L;
bitField0_ = (bitField0_ & ~0x00000020);
if (blockChecksumOptionsBuilder_ == null) {
blockChecksumOptions_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance();
} else {
blockChecksumOptionsBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000040);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (headerBuilder_ == null) {
result.header_ = header_;
} else {
result.header_ = headerBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
if (datanodesBuilder_ == null) {
result.datanodes_ = datanodes_;
} else {
result.datanodes_ = datanodesBuilder_.build();
}
if (blockTokensBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004)) {
blockTokens_ = java.util.Collections.unmodifiableList(blockTokens_);
bitField0_ = (bitField0_ & ~0x00000004);
}
result.blockTokens_ = blockTokens_;
} else {
result.blockTokens_ = blockTokensBuilder_.build();
}
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000004;
}
if (ecPolicyBuilder_ == null) {
result.ecPolicy_ = ecPolicy_;
} else {
result.ecPolicy_ = ecPolicyBuilder_.build();
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
blockIndices_ = java.util.Collections.unmodifiableList(blockIndices_);
bitField0_ = (bitField0_ & ~0x00000010);
}
result.blockIndices_ = blockIndices_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000008;
}
result.requestedNumBytes_ = requestedNumBytes_;
if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
to_bitField0_ |= 0x00000010;
}
if (blockChecksumOptionsBuilder_ == null) {
result.blockChecksumOptions_ = blockChecksumOptions_;
} else {
result.blockChecksumOptions_ = blockChecksumOptionsBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto.getDefaultInstance()) return this;
if (other.hasHeader()) {
mergeHeader(other.getHeader());
}
if (other.hasDatanodes()) {
mergeDatanodes(other.getDatanodes());
}
if (blockTokensBuilder_ == null) {
if (!other.blockTokens_.isEmpty()) {
if (blockTokens_.isEmpty()) {
blockTokens_ = other.blockTokens_;
bitField0_ = (bitField0_ & ~0x00000004);
} else {
ensureBlockTokensIsMutable();
blockTokens_.addAll(other.blockTokens_);
}
onChanged();
}
} else {
if (!other.blockTokens_.isEmpty()) {
if (blockTokensBuilder_.isEmpty()) {
blockTokensBuilder_.dispose();
blockTokensBuilder_ = null;
blockTokens_ = other.blockTokens_;
bitField0_ = (bitField0_ & ~0x00000004);
blockTokensBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getBlockTokensFieldBuilder() : null;
} else {
blockTokensBuilder_.addAllMessages(other.blockTokens_);
}
}
}
if (other.hasEcPolicy()) {
mergeEcPolicy(other.getEcPolicy());
}
if (!other.blockIndices_.isEmpty()) {
if (blockIndices_.isEmpty()) {
blockIndices_ = other.blockIndices_;
bitField0_ = (bitField0_ & ~0x00000010);
} else {
ensureBlockIndicesIsMutable();
blockIndices_.addAll(other.blockIndices_);
}
onChanged();
}
if (other.hasRequestedNumBytes()) {
setRequestedNumBytes(other.getRequestedNumBytes());
}
if (other.hasBlockChecksumOptions()) {
mergeBlockChecksumOptions(other.getBlockChecksumOptions());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasHeader()) {
return false;
}
if (!hasDatanodes()) {
return false;
}
if (!hasEcPolicy()) {
return false;
}
if (!hasRequestedNumBytes()) {
return false;
}
if (!getHeader().isInitialized()) {
return false;
}
if (!getDatanodes().isInitialized()) {
return false;
}
for (int i = 0; i < getBlockTokensCount(); i++) {
if (!getBlockTokens(i).isInitialized()) {
return false;
}
}
if (!getEcPolicy().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.BaseHeaderProto header = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> headerBuilder_;
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
if (headerBuilder_ == null) {
return header_;
} else {
return headerBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
if (headerBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
header_ = value;
onChanged();
} else {
headerBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder setHeader(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder builderForValue) {
if (headerBuilder_ == null) {
header_ = builderForValue.build();
onChanged();
} else {
headerBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
if (headerBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
header_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial();
} else {
header_ = value;
}
onChanged();
} else {
headerBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder clearHeader() {
if (headerBuilder_ == null) {
header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
onChanged();
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder getHeaderBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getHeaderFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
if (headerBuilder_ != null) {
return headerBuilder_.getMessageOrBuilder();
} else {
return header_;
}
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>
getHeaderFieldBuilder() {
if (headerBuilder_ == null) {
headerBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>(
header_,
getParentForChildren(),
isClean());
header_ = null;
}
return headerBuilder_;
}
// required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto datanodes_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder> datanodesBuilder_;
/**
* required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
*/
public boolean hasDatanodes() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getDatanodes() {
if (datanodesBuilder_ == null) {
return datanodes_;
} else {
return datanodesBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
*/
public Builder setDatanodes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto value) {
if (datanodesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
datanodes_ = value;
onChanged();
} else {
datanodesBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
*/
public Builder setDatanodes(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder builderForValue) {
if (datanodesBuilder_ == null) {
datanodes_ = builderForValue.build();
onChanged();
} else {
datanodesBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
return this;
}
/**
* required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
*/
public Builder mergeDatanodes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto value) {
if (datanodesBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002) &&
datanodes_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance()) {
datanodes_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.newBuilder(datanodes_).mergeFrom(value).buildPartial();
} else {
datanodes_ = value;
}
onChanged();
} else {
datanodesBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
*/
public Builder clearDatanodes() {
if (datanodesBuilder_ == null) {
datanodes_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance();
onChanged();
} else {
datanodesBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
/**
* required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder getDatanodesBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getDatanodesFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder getDatanodesOrBuilder() {
if (datanodesBuilder_ != null) {
return datanodesBuilder_.getMessageOrBuilder();
} else {
return datanodes_;
}
}
/**
* required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder>
getDatanodesFieldBuilder() {
if (datanodesBuilder_ == null) {
datanodesBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder>(
datanodes_,
getParentForChildren(),
isClean());
datanodes_ = null;
}
return datanodesBuilder_;
}
// repeated .hadoop.common.TokenProto blockTokens = 3;
private java.util.List blockTokens_ =
java.util.Collections.emptyList();
private void ensureBlockTokensIsMutable() {
if (!((bitField0_ & 0x00000004) == 0x00000004)) {
blockTokens_ = new java.util.ArrayList(blockTokens_);
bitField0_ |= 0x00000004;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> blockTokensBuilder_;
/**
* repeated .hadoop.common.TokenProto blockTokens = 3;
*
*
* each internal block has a block token
*
*/
public java.util.List getBlockTokensList() {
if (blockTokensBuilder_ == null) {
return java.util.Collections.unmodifiableList(blockTokens_);
} else {
return blockTokensBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 3;
*
*
* each internal block has a block token
*
*/
public int getBlockTokensCount() {
if (blockTokensBuilder_ == null) {
return blockTokens_.size();
} else {
return blockTokensBuilder_.getCount();
}
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 3;
*
*
* each internal block has a block token
*
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockTokens(int index) {
if (blockTokensBuilder_ == null) {
return blockTokens_.get(index);
} else {
return blockTokensBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 3;
*
*
* each internal block has a block token
*
*/
public Builder setBlockTokens(
int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
if (blockTokensBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlockTokensIsMutable();
blockTokens_.set(index, value);
onChanged();
} else {
blockTokensBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 3;
*
*
* each internal block has a block token
*
*/
public Builder setBlockTokens(
int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) {
if (blockTokensBuilder_ == null) {
ensureBlockTokensIsMutable();
blockTokens_.set(index, builderForValue.build());
onChanged();
} else {
blockTokensBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 3;
*
*
* each internal block has a block token
*
*/
public Builder addBlockTokens(org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
if (blockTokensBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlockTokensIsMutable();
blockTokens_.add(value);
onChanged();
} else {
blockTokensBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 3;
*
*
* each internal block has a block token
*
*/
public Builder addBlockTokens(
int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
if (blockTokensBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBlockTokensIsMutable();
blockTokens_.add(index, value);
onChanged();
} else {
blockTokensBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 3;
*
*
* each internal block has a block token
*
*/
public Builder addBlockTokens(
org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) {
if (blockTokensBuilder_ == null) {
ensureBlockTokensIsMutable();
blockTokens_.add(builderForValue.build());
onChanged();
} else {
blockTokensBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 3;
*
*
* each internal block has a block token
*
*/
public Builder addBlockTokens(
int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) {
if (blockTokensBuilder_ == null) {
ensureBlockTokensIsMutable();
blockTokens_.add(index, builderForValue.build());
onChanged();
} else {
blockTokensBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 3;
*
*
* each internal block has a block token
*
*/
public Builder addAllBlockTokens(
java.lang.Iterable extends org.apache.hadoop.security.proto.SecurityProtos.TokenProto> values) {
if (blockTokensBuilder_ == null) {
ensureBlockTokensIsMutable();
super.addAll(values, blockTokens_);
onChanged();
} else {
blockTokensBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 3;
*
*
* each internal block has a block token
*
*/
public Builder clearBlockTokens() {
if (blockTokensBuilder_ == null) {
blockTokens_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
onChanged();
} else {
blockTokensBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 3;
*
*
* each internal block has a block token
*
*/
public Builder removeBlockTokens(int index) {
if (blockTokensBuilder_ == null) {
ensureBlockTokensIsMutable();
blockTokens_.remove(index);
onChanged();
} else {
blockTokensBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 3;
*
*
* each internal block has a block token
*
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder getBlockTokensBuilder(
int index) {
return getBlockTokensFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 3;
*
*
* each internal block has a block token
*
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokensOrBuilder(
int index) {
if (blockTokensBuilder_ == null) {
return blockTokens_.get(index); } else {
return blockTokensBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 3;
*
*
* each internal block has a block token
*
*/
public java.util.List extends org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>
getBlockTokensOrBuilderList() {
if (blockTokensBuilder_ != null) {
return blockTokensBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(blockTokens_);
}
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 3;
*
*
* each internal block has a block token
*
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder addBlockTokensBuilder() {
return getBlockTokensFieldBuilder().addBuilder(
org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance());
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 3;
*
*
* each internal block has a block token
*
*/
public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder addBlockTokensBuilder(
int index) {
return getBlockTokensFieldBuilder().addBuilder(
index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance());
}
/**
* repeated .hadoop.common.TokenProto blockTokens = 3;
*
*
* each internal block has a block token
*
*/
public java.util.List
getBlockTokensBuilderList() {
return getBlockTokensFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>
getBlockTokensFieldBuilder() {
if (blockTokensBuilder_ == null) {
blockTokensBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>(
blockTokens_,
((bitField0_ & 0x00000004) == 0x00000004),
getParentForChildren(),
isClean());
blockTokens_ = null;
}
return blockTokensBuilder_;
}
// required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto ecPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> ecPolicyBuilder_;
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public boolean hasEcPolicy() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy() {
if (ecPolicyBuilder_ == null) {
return ecPolicy_;
} else {
return ecPolicyBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public Builder setEcPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) {
if (ecPolicyBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ecPolicy_ = value;
onChanged();
} else {
ecPolicyBuilder_.setMessage(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public Builder setEcPolicy(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder builderForValue) {
if (ecPolicyBuilder_ == null) {
ecPolicy_ = builderForValue.build();
onChanged();
} else {
ecPolicyBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000008;
return this;
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public Builder mergeEcPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) {
if (ecPolicyBuilder_ == null) {
if (((bitField0_ & 0x00000008) == 0x00000008) &&
ecPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance()) {
ecPolicy_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.newBuilder(ecPolicy_).mergeFrom(value).buildPartial();
} else {
ecPolicy_ = value;
}
onChanged();
} else {
ecPolicyBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public Builder clearEcPolicy() {
if (ecPolicyBuilder_ == null) {
ecPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance();
onChanged();
} else {
ecPolicyBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder getEcPolicyBuilder() {
bitField0_ |= 0x00000008;
onChanged();
return getEcPolicyFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder() {
if (ecPolicyBuilder_ != null) {
return ecPolicyBuilder_.getMessageOrBuilder();
} else {
return ecPolicy_;
}
}
/**
* required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder>
getEcPolicyFieldBuilder() {
if (ecPolicyBuilder_ == null) {
ecPolicyBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder>(
ecPolicy_,
getParentForChildren(),
isClean());
ecPolicy_ = null;
}
return ecPolicyBuilder_;
}
// repeated uint32 blockIndices = 5;
private java.util.List blockIndices_ = java.util.Collections.emptyList();
private void ensureBlockIndicesIsMutable() {
if (!((bitField0_ & 0x00000010) == 0x00000010)) {
blockIndices_ = new java.util.ArrayList(blockIndices_);
bitField0_ |= 0x00000010;
}
}
/**
* repeated uint32 blockIndices = 5;
*/
public java.util.List
getBlockIndicesList() {
return java.util.Collections.unmodifiableList(blockIndices_);
}
/**
* repeated uint32 blockIndices = 5;
*/
public int getBlockIndicesCount() {
return blockIndices_.size();
}
/**
* repeated uint32 blockIndices = 5;
*/
public int getBlockIndices(int index) {
return blockIndices_.get(index);
}
/**
* repeated uint32 blockIndices = 5;
*/
public Builder setBlockIndices(
int index, int value) {
ensureBlockIndicesIsMutable();
blockIndices_.set(index, value);
onChanged();
return this;
}
/**
* repeated uint32 blockIndices = 5;
*/
public Builder addBlockIndices(int value) {
ensureBlockIndicesIsMutable();
blockIndices_.add(value);
onChanged();
return this;
}
/**
* repeated uint32 blockIndices = 5;
*/
public Builder addAllBlockIndices(
java.lang.Iterable extends java.lang.Integer> values) {
ensureBlockIndicesIsMutable();
super.addAll(values, blockIndices_);
onChanged();
return this;
}
/**
* repeated uint32 blockIndices = 5;
*/
public Builder clearBlockIndices() {
blockIndices_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000010);
onChanged();
return this;
}
// required uint64 requestedNumBytes = 6;
private long requestedNumBytes_ ;
/**
* required uint64 requestedNumBytes = 6;
*/
public boolean hasRequestedNumBytes() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* required uint64 requestedNumBytes = 6;
*/
public long getRequestedNumBytes() {
return requestedNumBytes_;
}
/**
* required uint64 requestedNumBytes = 6;
*/
public Builder setRequestedNumBytes(long value) {
bitField0_ |= 0x00000020;
requestedNumBytes_ = value;
onChanged();
return this;
}
/**
* required uint64 requestedNumBytes = 6;
*/
public Builder clearRequestedNumBytes() {
bitField0_ = (bitField0_ & ~0x00000020);
requestedNumBytes_ = 0L;
onChanged();
return this;
}
// optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto blockChecksumOptions_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder> blockChecksumOptionsBuilder_;
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
public boolean hasBlockChecksumOptions() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions() {
if (blockChecksumOptionsBuilder_ == null) {
return blockChecksumOptions_;
} else {
return blockChecksumOptionsBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
public Builder setBlockChecksumOptions(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto value) {
if (blockChecksumOptionsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
blockChecksumOptions_ = value;
onChanged();
} else {
blockChecksumOptionsBuilder_.setMessage(value);
}
bitField0_ |= 0x00000040;
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
public Builder setBlockChecksumOptions(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder builderForValue) {
if (blockChecksumOptionsBuilder_ == null) {
blockChecksumOptions_ = builderForValue.build();
onChanged();
} else {
blockChecksumOptionsBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000040;
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
public Builder mergeBlockChecksumOptions(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto value) {
if (blockChecksumOptionsBuilder_ == null) {
if (((bitField0_ & 0x00000040) == 0x00000040) &&
blockChecksumOptions_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance()) {
blockChecksumOptions_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.newBuilder(blockChecksumOptions_).mergeFrom(value).buildPartial();
} else {
blockChecksumOptions_ = value;
}
onChanged();
} else {
blockChecksumOptionsBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000040;
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
public Builder clearBlockChecksumOptions() {
if (blockChecksumOptionsBuilder_ == null) {
blockChecksumOptions_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance();
onChanged();
} else {
blockChecksumOptionsBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000040);
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder getBlockChecksumOptionsBuilder() {
bitField0_ |= 0x00000040;
onChanged();
return getBlockChecksumOptionsFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder() {
if (blockChecksumOptionsBuilder_ != null) {
return blockChecksumOptionsBuilder_.getMessageOrBuilder();
} else {
return blockChecksumOptions_;
}
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder>
getBlockChecksumOptionsFieldBuilder() {
if (blockChecksumOptionsBuilder_ == null) {
blockChecksumOptionsBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder>(
blockChecksumOptions_,
getParentForChildren(),
isClean());
blockChecksumOptions_ = null;
}
return blockChecksumOptionsBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpBlockGroupChecksumProto)
}
static {
defaultInstance = new OpBlockGroupChecksumProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.OpBlockGroupChecksumProto)
}
public interface ShortCircuitShmIdProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required int64 hi = 1;
/**
* required int64 hi = 1;
*/
boolean hasHi();
/**
* required int64 hi = 1;
*/
long getHi();
// required int64 lo = 2;
/**
* required int64 lo = 2;
*/
boolean hasLo();
/**
* required int64 lo = 2;
*/
long getLo();
}
/**
* Protobuf type {@code hadoop.hdfs.ShortCircuitShmIdProto}
*
*
**
* An ID uniquely identifying a shared memory segment.
*
*/
public static final class ShortCircuitShmIdProto extends
com.google.protobuf.GeneratedMessage
implements ShortCircuitShmIdProtoOrBuilder {
// Use ShortCircuitShmIdProto.newBuilder() to construct.
private ShortCircuitShmIdProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ShortCircuitShmIdProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ShortCircuitShmIdProto defaultInstance;
public static ShortCircuitShmIdProto getDefaultInstance() {
return defaultInstance;
}
public ShortCircuitShmIdProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ShortCircuitShmIdProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
hi_ = input.readInt64();
break;
}
case 16: {
bitField0_ |= 0x00000002;
lo_ = input.readInt64();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmIdProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmIdProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ShortCircuitShmIdProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ShortCircuitShmIdProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required int64 hi = 1;
public static final int HI_FIELD_NUMBER = 1;
private long hi_;
/**
* required int64 hi = 1;
*/
public boolean hasHi() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required int64 hi = 1;
*/
public long getHi() {
return hi_;
}
// required int64 lo = 2;
public static final int LO_FIELD_NUMBER = 2;
private long lo_;
/**
* required int64 lo = 2;
*/
public boolean hasLo() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required int64 lo = 2;
*/
public long getLo() {
return lo_;
}
private void initFields() {
hi_ = 0L;
lo_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasHi()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasLo()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeInt64(1, hi_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeInt64(2, lo_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(1, hi_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(2, lo_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto) obj;
boolean result = true;
result = result && (hasHi() == other.hasHi());
if (hasHi()) {
result = result && (getHi()
== other.getHi());
}
result = result && (hasLo() == other.hasLo());
if (hasLo()) {
result = result && (getLo()
== other.getLo());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasHi()) {
hash = (37 * hash) + HI_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getHi());
}
if (hasLo()) {
hash = (37 * hash) + LO_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getLo());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ShortCircuitShmIdProto}
*
*
**
* An ID uniquely identifying a shared memory segment.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmIdProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmIdProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
hi_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
lo_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmIdProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.hi_ = hi_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.lo_ = lo_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance()) return this;
if (other.hasHi()) {
setHi(other.getHi());
}
if (other.hasLo()) {
setLo(other.getLo());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasHi()) {
return false;
}
if (!hasLo()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required int64 hi = 1;
private long hi_ ;
/**
* required int64 hi = 1;
*/
public boolean hasHi() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required int64 hi = 1;
*/
public long getHi() {
return hi_;
}
/**
* required int64 hi = 1;
*/
public Builder setHi(long value) {
bitField0_ |= 0x00000001;
hi_ = value;
onChanged();
return this;
}
/**
* required int64 hi = 1;
*/
public Builder clearHi() {
bitField0_ = (bitField0_ & ~0x00000001);
hi_ = 0L;
onChanged();
return this;
}
// required int64 lo = 2;
private long lo_ ;
/**
* required int64 lo = 2;
*/
public boolean hasLo() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required int64 lo = 2;
*/
public long getLo() {
return lo_;
}
/**
* required int64 lo = 2;
*/
public Builder setLo(long value) {
bitField0_ |= 0x00000002;
lo_ = value;
onChanged();
return this;
}
/**
* required int64 lo = 2;
*/
public Builder clearLo() {
bitField0_ = (bitField0_ & ~0x00000002);
lo_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ShortCircuitShmIdProto)
}
static {
defaultInstance = new ShortCircuitShmIdProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ShortCircuitShmIdProto)
}
public interface ShortCircuitShmSlotProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
boolean hasShmId();
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getShmId();
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder getShmIdOrBuilder();
// required int32 slotIdx = 2;
/**
* required int32 slotIdx = 2;
*/
boolean hasSlotIdx();
/**
* required int32 slotIdx = 2;
*/
int getSlotIdx();
}
/**
* Protobuf type {@code hadoop.hdfs.ShortCircuitShmSlotProto}
*
*
**
* An ID uniquely identifying a slot within a shared memory segment.
*
*/
public static final class ShortCircuitShmSlotProto extends
com.google.protobuf.GeneratedMessage
implements ShortCircuitShmSlotProtoOrBuilder {
// Use ShortCircuitShmSlotProto.newBuilder() to construct.
private ShortCircuitShmSlotProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ShortCircuitShmSlotProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ShortCircuitShmSlotProto defaultInstance;
public static ShortCircuitShmSlotProto getDefaultInstance() {
return defaultInstance;
}
public ShortCircuitShmSlotProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ShortCircuitShmSlotProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = shmId_.toBuilder();
}
shmId_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(shmId_);
shmId_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 16: {
bitField0_ |= 0x00000002;
slotIdx_ = input.readInt32();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ShortCircuitShmSlotProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ShortCircuitShmSlotProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
public static final int SHMID_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto shmId_;
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
public boolean hasShmId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getShmId() {
return shmId_;
}
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder getShmIdOrBuilder() {
return shmId_;
}
// required int32 slotIdx = 2;
public static final int SLOTIDX_FIELD_NUMBER = 2;
private int slotIdx_;
/**
* required int32 slotIdx = 2;
*/
public boolean hasSlotIdx() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required int32 slotIdx = 2;
*/
public int getSlotIdx() {
return slotIdx_;
}
private void initFields() {
shmId_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance();
slotIdx_ = 0;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasShmId()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasSlotIdx()) {
memoizedIsInitialized = 0;
return false;
}
if (!getShmId().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, shmId_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeInt32(2, slotIdx_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, shmId_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(2, slotIdx_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto) obj;
boolean result = true;
result = result && (hasShmId() == other.hasShmId());
if (hasShmId()) {
result = result && getShmId()
.equals(other.getShmId());
}
result = result && (hasSlotIdx() == other.hasSlotIdx());
if (hasSlotIdx()) {
result = result && (getSlotIdx()
== other.getSlotIdx());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasShmId()) {
hash = (37 * hash) + SHMID_FIELD_NUMBER;
hash = (53 * hash) + getShmId().hashCode();
}
if (hasSlotIdx()) {
hash = (37 * hash) + SLOTIDX_FIELD_NUMBER;
hash = (53 * hash) + getSlotIdx();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ShortCircuitShmSlotProto}
*
*
**
* An ID uniquely identifying a slot within a shared memory segment.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getShmIdFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (shmIdBuilder_ == null) {
shmId_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance();
} else {
shmIdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
slotIdx_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (shmIdBuilder_ == null) {
result.shmId_ = shmId_;
} else {
result.shmId_ = shmIdBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.slotIdx_ = slotIdx_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance()) return this;
if (other.hasShmId()) {
mergeShmId(other.getShmId());
}
if (other.hasSlotIdx()) {
setSlotIdx(other.getSlotIdx());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasShmId()) {
return false;
}
if (!hasSlotIdx()) {
return false;
}
if (!getShmId().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto shmId_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder> shmIdBuilder_;
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
public boolean hasShmId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getShmId() {
if (shmIdBuilder_ == null) {
return shmId_;
} else {
return shmIdBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
public Builder setShmId(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto value) {
if (shmIdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
shmId_ = value;
onChanged();
} else {
shmIdBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
public Builder setShmId(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder builderForValue) {
if (shmIdBuilder_ == null) {
shmId_ = builderForValue.build();
onChanged();
} else {
shmIdBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
public Builder mergeShmId(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto value) {
if (shmIdBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
shmId_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance()) {
shmId_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.newBuilder(shmId_).mergeFrom(value).buildPartial();
} else {
shmId_ = value;
}
onChanged();
} else {
shmIdBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
public Builder clearShmId() {
if (shmIdBuilder_ == null) {
shmId_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance();
onChanged();
} else {
shmIdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder getShmIdBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getShmIdFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder getShmIdOrBuilder() {
if (shmIdBuilder_ != null) {
return shmIdBuilder_.getMessageOrBuilder();
} else {
return shmId_;
}
}
/**
* required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder>
getShmIdFieldBuilder() {
if (shmIdBuilder_ == null) {
shmIdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder>(
shmId_,
getParentForChildren(),
isClean());
shmId_ = null;
}
return shmIdBuilder_;
}
// required int32 slotIdx = 2;
private int slotIdx_ ;
/**
* required int32 slotIdx = 2;
*/
public boolean hasSlotIdx() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required int32 slotIdx = 2;
*/
public int getSlotIdx() {
return slotIdx_;
}
/**
* required int32 slotIdx = 2;
*/
public Builder setSlotIdx(int value) {
bitField0_ |= 0x00000002;
slotIdx_ = value;
onChanged();
return this;
}
/**
* required int32 slotIdx = 2;
*/
public Builder clearSlotIdx() {
bitField0_ = (bitField0_ & ~0x00000002);
slotIdx_ = 0;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ShortCircuitShmSlotProto)
}
static {
defaultInstance = new ShortCircuitShmSlotProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ShortCircuitShmSlotProto)
}
public interface OpRequestShortCircuitAccessProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.BaseHeaderProto header = 1;
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
boolean hasHeader();
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader();
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder();
// required uint32 maxVersion = 2;
/**
* required uint32 maxVersion = 2;
*
*
** In order to get short-circuit access to block data, clients must set this
* to the highest version of the block data that they can understand.
* Currently 1 is the only version, but more versions may exist in the future
* if the on-disk format changes.
*
*/
boolean hasMaxVersion();
/**
* required uint32 maxVersion = 2;
*
*
** In order to get short-circuit access to block data, clients must set this
* to the highest version of the block data that they can understand.
* Currently 1 is the only version, but more versions may exist in the future
* if the on-disk format changes.
*
*/
int getMaxVersion();
// optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
/**
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*
*
**
* The shared memory slot to use, if we are using one.
*
*/
boolean hasSlotId();
/**
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*
*
**
* The shared memory slot to use, if we are using one.
*
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto getSlotId();
/**
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*
*
**
* The shared memory slot to use, if we are using one.
*
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder getSlotIdOrBuilder();
// optional bool supportsReceiptVerification = 4 [default = false];
/**
* optional bool supportsReceiptVerification = 4 [default = false];
*
*
**
* True if the client supports verifying that the file descriptor has been
* sent successfully.
*
*/
boolean hasSupportsReceiptVerification();
/**
* optional bool supportsReceiptVerification = 4 [default = false];
*
*
**
* True if the client supports verifying that the file descriptor has been
* sent successfully.
*
*/
boolean getSupportsReceiptVerification();
}
/**
* Protobuf type {@code hadoop.hdfs.OpRequestShortCircuitAccessProto}
*/
public static final class OpRequestShortCircuitAccessProto extends
com.google.protobuf.GeneratedMessage
implements OpRequestShortCircuitAccessProtoOrBuilder {
// Use OpRequestShortCircuitAccessProto.newBuilder() to construct.
private OpRequestShortCircuitAccessProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private OpRequestShortCircuitAccessProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final OpRequestShortCircuitAccessProto defaultInstance;
public static OpRequestShortCircuitAccessProto getDefaultInstance() {
return defaultInstance;
}
public OpRequestShortCircuitAccessProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private OpRequestShortCircuitAccessProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = header_.toBuilder();
}
header_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(header_);
header_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 16: {
bitField0_ |= 0x00000002;
maxVersion_ = input.readUInt32();
break;
}
case 26: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
subBuilder = slotId_.toBuilder();
}
slotId_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(slotId_);
slotId_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000004;
break;
}
case 32: {
bitField0_ |= 0x00000008;
supportsReceiptVerification_ = input.readBool();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public OpRequestShortCircuitAccessProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new OpRequestShortCircuitAccessProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.BaseHeaderProto header = 1;
public static final int HEADER_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_;
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
return header_;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
return header_;
}
// required uint32 maxVersion = 2;
public static final int MAXVERSION_FIELD_NUMBER = 2;
private int maxVersion_;
/**
* required uint32 maxVersion = 2;
*
*
** In order to get short-circuit access to block data, clients must set this
* to the highest version of the block data that they can understand.
* Currently 1 is the only version, but more versions may exist in the future
* if the on-disk format changes.
*
*/
public boolean hasMaxVersion() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint32 maxVersion = 2;
*
*
** In order to get short-circuit access to block data, clients must set this
* to the highest version of the block data that they can understand.
* Currently 1 is the only version, but more versions may exist in the future
* if the on-disk format changes.
*
*/
public int getMaxVersion() {
return maxVersion_;
}
// optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
public static final int SLOTID_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto slotId_;
/**
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*
*
**
* The shared memory slot to use, if we are using one.
*
*/
public boolean hasSlotId() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*
*
**
* The shared memory slot to use, if we are using one.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto getSlotId() {
return slotId_;
}
/**
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*
*
**
* The shared memory slot to use, if we are using one.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder getSlotIdOrBuilder() {
return slotId_;
}
// optional bool supportsReceiptVerification = 4 [default = false];
public static final int SUPPORTSRECEIPTVERIFICATION_FIELD_NUMBER = 4;
private boolean supportsReceiptVerification_;
/**
* optional bool supportsReceiptVerification = 4 [default = false];
*
*
**
* True if the client supports verifying that the file descriptor has been
* sent successfully.
*
*/
public boolean hasSupportsReceiptVerification() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional bool supportsReceiptVerification = 4 [default = false];
*
*
**
* True if the client supports verifying that the file descriptor has been
* sent successfully.
*
*/
public boolean getSupportsReceiptVerification() {
return supportsReceiptVerification_;
}
private void initFields() {
header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
maxVersion_ = 0;
slotId_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance();
supportsReceiptVerification_ = false;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasHeader()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasMaxVersion()) {
memoizedIsInitialized = 0;
return false;
}
if (!getHeader().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
if (hasSlotId()) {
if (!getSlotId().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, header_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt32(2, maxVersion_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeMessage(3, slotId_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeBool(4, supportsReceiptVerification_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, header_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(2, maxVersion_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, slotId_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(4, supportsReceiptVerification_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto) obj;
boolean result = true;
result = result && (hasHeader() == other.hasHeader());
if (hasHeader()) {
result = result && getHeader()
.equals(other.getHeader());
}
result = result && (hasMaxVersion() == other.hasMaxVersion());
if (hasMaxVersion()) {
result = result && (getMaxVersion()
== other.getMaxVersion());
}
result = result && (hasSlotId() == other.hasSlotId());
if (hasSlotId()) {
result = result && getSlotId()
.equals(other.getSlotId());
}
result = result && (hasSupportsReceiptVerification() == other.hasSupportsReceiptVerification());
if (hasSupportsReceiptVerification()) {
result = result && (getSupportsReceiptVerification()
== other.getSupportsReceiptVerification());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasHeader()) {
hash = (37 * hash) + HEADER_FIELD_NUMBER;
hash = (53 * hash) + getHeader().hashCode();
}
if (hasMaxVersion()) {
hash = (37 * hash) + MAXVERSION_FIELD_NUMBER;
hash = (53 * hash) + getMaxVersion();
}
if (hasSlotId()) {
hash = (37 * hash) + SLOTID_FIELD_NUMBER;
hash = (53 * hash) + getSlotId().hashCode();
}
if (hasSupportsReceiptVerification()) {
hash = (37 * hash) + SUPPORTSRECEIPTVERIFICATION_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getSupportsReceiptVerification());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.OpRequestShortCircuitAccessProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getHeaderFieldBuilder();
getSlotIdFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (headerBuilder_ == null) {
header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
maxVersion_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
if (slotIdBuilder_ == null) {
slotId_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance();
} else {
slotIdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
supportsReceiptVerification_ = false;
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (headerBuilder_ == null) {
result.header_ = header_;
} else {
result.header_ = headerBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.maxVersion_ = maxVersion_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
if (slotIdBuilder_ == null) {
result.slotId_ = slotId_;
} else {
result.slotId_ = slotIdBuilder_.build();
}
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.supportsReceiptVerification_ = supportsReceiptVerification_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto.getDefaultInstance()) return this;
if (other.hasHeader()) {
mergeHeader(other.getHeader());
}
if (other.hasMaxVersion()) {
setMaxVersion(other.getMaxVersion());
}
if (other.hasSlotId()) {
mergeSlotId(other.getSlotId());
}
if (other.hasSupportsReceiptVerification()) {
setSupportsReceiptVerification(other.getSupportsReceiptVerification());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasHeader()) {
return false;
}
if (!hasMaxVersion()) {
return false;
}
if (!getHeader().isInitialized()) {
return false;
}
if (hasSlotId()) {
if (!getSlotId().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.BaseHeaderProto header = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> headerBuilder_;
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public boolean hasHeader() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
if (headerBuilder_ == null) {
return header_;
} else {
return headerBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
if (headerBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
header_ = value;
onChanged();
} else {
headerBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder setHeader(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder builderForValue) {
if (headerBuilder_ == null) {
header_ = builderForValue.build();
onChanged();
} else {
headerBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
if (headerBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
header_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial();
} else {
header_ = value;
}
onChanged();
} else {
headerBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public Builder clearHeader() {
if (headerBuilder_ == null) {
header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
onChanged();
} else {
headerBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder getHeaderBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getHeaderFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
if (headerBuilder_ != null) {
return headerBuilder_.getMessageOrBuilder();
} else {
return header_;
}
}
/**
* required .hadoop.hdfs.BaseHeaderProto header = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>
getHeaderFieldBuilder() {
if (headerBuilder_ == null) {
headerBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>(
header_,
getParentForChildren(),
isClean());
header_ = null;
}
return headerBuilder_;
}
// required uint32 maxVersion = 2;
private int maxVersion_ ;
/**
* required uint32 maxVersion = 2;
*
*
** In order to get short-circuit access to block data, clients must set this
* to the highest version of the block data that they can understand.
* Currently 1 is the only version, but more versions may exist in the future
* if the on-disk format changes.
*
*/
public boolean hasMaxVersion() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint32 maxVersion = 2;
*
*
** In order to get short-circuit access to block data, clients must set this
* to the highest version of the block data that they can understand.
* Currently 1 is the only version, but more versions may exist in the future
* if the on-disk format changes.
*
*/
public int getMaxVersion() {
return maxVersion_;
}
/**
* required uint32 maxVersion = 2;
*
*
** In order to get short-circuit access to block data, clients must set this
* to the highest version of the block data that they can understand.
* Currently 1 is the only version, but more versions may exist in the future
* if the on-disk format changes.
*
*/
public Builder setMaxVersion(int value) {
bitField0_ |= 0x00000002;
maxVersion_ = value;
onChanged();
return this;
}
/**
* required uint32 maxVersion = 2;
*
*
** In order to get short-circuit access to block data, clients must set this
* to the highest version of the block data that they can understand.
* Currently 1 is the only version, but more versions may exist in the future
* if the on-disk format changes.
*
*/
public Builder clearMaxVersion() {
bitField0_ = (bitField0_ & ~0x00000002);
maxVersion_ = 0;
onChanged();
return this;
}
// optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto slotId_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder> slotIdBuilder_;
/**
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*
*
**
* The shared memory slot to use, if we are using one.
*
*/
public boolean hasSlotId() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*
*
**
* The shared memory slot to use, if we are using one.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto getSlotId() {
if (slotIdBuilder_ == null) {
return slotId_;
} else {
return slotIdBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*
*
**
* The shared memory slot to use, if we are using one.
*
*/
public Builder setSlotId(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto value) {
if (slotIdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
slotId_ = value;
onChanged();
} else {
slotIdBuilder_.setMessage(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*
*
**
* The shared memory slot to use, if we are using one.
*
*/
public Builder setSlotId(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder builderForValue) {
if (slotIdBuilder_ == null) {
slotId_ = builderForValue.build();
onChanged();
} else {
slotIdBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*
*
**
* The shared memory slot to use, if we are using one.
*
*/
public Builder mergeSlotId(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto value) {
if (slotIdBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004) &&
slotId_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance()) {
slotId_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.newBuilder(slotId_).mergeFrom(value).buildPartial();
} else {
slotId_ = value;
}
onChanged();
} else {
slotIdBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*
*
**
* The shared memory slot to use, if we are using one.
*
*/
public Builder clearSlotId() {
if (slotIdBuilder_ == null) {
slotId_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance();
onChanged();
} else {
slotIdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
/**
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*
*
**
* The shared memory slot to use, if we are using one.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder getSlotIdBuilder() {
bitField0_ |= 0x00000004;
onChanged();
return getSlotIdFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*
*
**
* The shared memory slot to use, if we are using one.
*
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder getSlotIdOrBuilder() {
if (slotIdBuilder_ != null) {
return slotIdBuilder_.getMessageOrBuilder();
} else {
return slotId_;
}
}
/**
* optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;
*
*
**
* The shared memory slot to use, if we are using one.
*
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder>
getSlotIdFieldBuilder() {
if (slotIdBuilder_ == null) {
slotIdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder>(
slotId_,
getParentForChildren(),
isClean());
slotId_ = null;
}
return slotIdBuilder_;
}
// optional bool supportsReceiptVerification = 4 [default = false];
private boolean supportsReceiptVerification_ ;
/**
* optional bool supportsReceiptVerification = 4 [default = false];
*
*
**
* True if the client supports verifying that the file descriptor has been
* sent successfully.
*
*/
public boolean hasSupportsReceiptVerification() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional bool supportsReceiptVerification = 4 [default = false];
*
*
**
* True if the client supports verifying that the file descriptor has been
* sent successfully.
*
*/
public boolean getSupportsReceiptVerification() {
return supportsReceiptVerification_;
}
/**
* optional bool supportsReceiptVerification = 4 [default = false];
*
*
**
* True if the client supports verifying that the file descriptor has been
* sent successfully.
*
*/
public Builder setSupportsReceiptVerification(boolean value) {
bitField0_ |= 0x00000008;
supportsReceiptVerification_ = value;
onChanged();
return this;
}
/**
* optional bool supportsReceiptVerification = 4 [default = false];
*
*
**
* True if the client supports verifying that the file descriptor has been
* sent successfully.
*
*/
public Builder clearSupportsReceiptVerification() {
bitField0_ = (bitField0_ & ~0x00000008);
supportsReceiptVerification_ = false;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpRequestShortCircuitAccessProto)
}
static {
defaultInstance = new OpRequestShortCircuitAccessProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.OpRequestShortCircuitAccessProto)
}
public interface ReleaseShortCircuitAccessRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
/**
* required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
*/
boolean hasSlotId();
/**
* required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto getSlotId();
/**
* required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder getSlotIdOrBuilder();
// optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
boolean hasTraceInfo();
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getTraceInfo();
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder getTraceInfoOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.ReleaseShortCircuitAccessRequestProto}
*/
public static final class ReleaseShortCircuitAccessRequestProto extends
com.google.protobuf.GeneratedMessage
implements ReleaseShortCircuitAccessRequestProtoOrBuilder {
// Use ReleaseShortCircuitAccessRequestProto.newBuilder() to construct.
private ReleaseShortCircuitAccessRequestProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ReleaseShortCircuitAccessRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ReleaseShortCircuitAccessRequestProto defaultInstance;
public static ReleaseShortCircuitAccessRequestProto getDefaultInstance() {
return defaultInstance;
}
public ReleaseShortCircuitAccessRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ReleaseShortCircuitAccessRequestProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = slotId_.toBuilder();
}
slotId_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(slotId_);
slotId_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
subBuilder = traceInfo_.toBuilder();
}
traceInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(traceInfo_);
traceInfo_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ReleaseShortCircuitAccessRequestProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ReleaseShortCircuitAccessRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
public static final int SLOTID_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto slotId_;
/**
* required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
*/
public boolean hasSlotId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto getSlotId() {
return slotId_;
}
/**
* required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder getSlotIdOrBuilder() {
return slotId_;
}
// optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
public static final int TRACEINFO_FIELD_NUMBER = 2;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto traceInfo_;
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public boolean hasTraceInfo() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getTraceInfo() {
return traceInfo_;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder getTraceInfoOrBuilder() {
return traceInfo_;
}
private void initFields() {
slotId_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance();
traceInfo_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasSlotId()) {
memoizedIsInitialized = 0;
return false;
}
if (!getSlotId().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
if (hasTraceInfo()) {
if (!getTraceInfo().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, slotId_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeMessage(2, traceInfo_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, slotId_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, traceInfo_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto) obj;
boolean result = true;
result = result && (hasSlotId() == other.hasSlotId());
if (hasSlotId()) {
result = result && getSlotId()
.equals(other.getSlotId());
}
result = result && (hasTraceInfo() == other.hasTraceInfo());
if (hasTraceInfo()) {
result = result && getTraceInfo()
.equals(other.getTraceInfo());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasSlotId()) {
hash = (37 * hash) + SLOTID_FIELD_NUMBER;
hash = (53 * hash) + getSlotId().hashCode();
}
if (hasTraceInfo()) {
hash = (37 * hash) + TRACEINFO_FIELD_NUMBER;
hash = (53 * hash) + getTraceInfo().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ReleaseShortCircuitAccessRequestProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getSlotIdFieldBuilder();
getTraceInfoFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (slotIdBuilder_ == null) {
slotId_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance();
} else {
slotIdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
if (traceInfoBuilder_ == null) {
traceInfo_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance();
} else {
traceInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (slotIdBuilder_ == null) {
result.slotId_ = slotId_;
} else {
result.slotId_ = slotIdBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
if (traceInfoBuilder_ == null) {
result.traceInfo_ = traceInfo_;
} else {
result.traceInfo_ = traceInfoBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto.getDefaultInstance()) return this;
if (other.hasSlotId()) {
mergeSlotId(other.getSlotId());
}
if (other.hasTraceInfo()) {
mergeTraceInfo(other.getTraceInfo());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasSlotId()) {
return false;
}
if (!getSlotId().isInitialized()) {
return false;
}
if (hasTraceInfo()) {
if (!getTraceInfo().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto slotId_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder> slotIdBuilder_;
/**
* required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
*/
public boolean hasSlotId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto getSlotId() {
if (slotIdBuilder_ == null) {
return slotId_;
} else {
return slotIdBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
*/
public Builder setSlotId(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto value) {
if (slotIdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
slotId_ = value;
onChanged();
} else {
slotIdBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
*/
public Builder setSlotId(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder builderForValue) {
if (slotIdBuilder_ == null) {
slotId_ = builderForValue.build();
onChanged();
} else {
slotIdBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
*/
public Builder mergeSlotId(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto value) {
if (slotIdBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
slotId_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance()) {
slotId_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.newBuilder(slotId_).mergeFrom(value).buildPartial();
} else {
slotId_ = value;
}
onChanged();
} else {
slotIdBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
*/
public Builder clearSlotId() {
if (slotIdBuilder_ == null) {
slotId_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance();
onChanged();
} else {
slotIdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder getSlotIdBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getSlotIdFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder getSlotIdOrBuilder() {
if (slotIdBuilder_ != null) {
return slotIdBuilder_.getMessageOrBuilder();
} else {
return slotId_;
}
}
/**
* required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder>
getSlotIdFieldBuilder() {
if (slotIdBuilder_ == null) {
slotIdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder>(
slotId_,
getParentForChildren(),
isClean());
slotId_ = null;
}
return slotIdBuilder_;
}
// optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto traceInfo_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder> traceInfoBuilder_;
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public boolean hasTraceInfo() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getTraceInfo() {
if (traceInfoBuilder_ == null) {
return traceInfo_;
} else {
return traceInfoBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public Builder setTraceInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto value) {
if (traceInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
traceInfo_ = value;
onChanged();
} else {
traceInfoBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public Builder setTraceInfo(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder builderForValue) {
if (traceInfoBuilder_ == null) {
traceInfo_ = builderForValue.build();
onChanged();
} else {
traceInfoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
return this;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public Builder mergeTraceInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto value) {
if (traceInfoBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002) &&
traceInfo_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance()) {
traceInfo_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.newBuilder(traceInfo_).mergeFrom(value).buildPartial();
} else {
traceInfo_ = value;
}
onChanged();
} else {
traceInfoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public Builder clearTraceInfo() {
if (traceInfoBuilder_ == null) {
traceInfo_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance();
onChanged();
} else {
traceInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder getTraceInfoBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getTraceInfoFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder getTraceInfoOrBuilder() {
if (traceInfoBuilder_ != null) {
return traceInfoBuilder_.getMessageOrBuilder();
} else {
return traceInfo_;
}
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder>
getTraceInfoFieldBuilder() {
if (traceInfoBuilder_ == null) {
traceInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder>(
traceInfo_,
getParentForChildren(),
isClean());
traceInfo_ = null;
}
return traceInfoBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ReleaseShortCircuitAccessRequestProto)
}
static {
defaultInstance = new ReleaseShortCircuitAccessRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ReleaseShortCircuitAccessRequestProto)
}
public interface ReleaseShortCircuitAccessResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.Status status = 1;
/**
* required .hadoop.hdfs.Status status = 1;
*/
boolean hasStatus();
/**
* required .hadoop.hdfs.Status status = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus();
// optional string error = 2;
/**
* optional string error = 2;
*/
boolean hasError();
/**
* optional string error = 2;
*/
java.lang.String getError();
/**
* optional string error = 2;
*/
com.google.protobuf.ByteString
getErrorBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.ReleaseShortCircuitAccessResponseProto}
*/
public static final class ReleaseShortCircuitAccessResponseProto extends
com.google.protobuf.GeneratedMessage
implements ReleaseShortCircuitAccessResponseProtoOrBuilder {
// Use ReleaseShortCircuitAccessResponseProto.newBuilder() to construct.
private ReleaseShortCircuitAccessResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ReleaseShortCircuitAccessResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ReleaseShortCircuitAccessResponseProto defaultInstance;
public static ReleaseShortCircuitAccessResponseProto getDefaultInstance() {
return defaultInstance;
}
public ReleaseShortCircuitAccessResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ReleaseShortCircuitAccessResponseProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
status_ = value;
}
break;
}
case 18: {
bitField0_ |= 0x00000002;
error_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ReleaseShortCircuitAccessResponseProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ReleaseShortCircuitAccessResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.Status status = 1;
public static final int STATUS_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status status_;
/**
* required .hadoop.hdfs.Status status = 1;
*/
public boolean hasStatus() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
return status_;
}
// optional string error = 2;
public static final int ERROR_FIELD_NUMBER = 2;
private java.lang.Object error_;
/**
* optional string error = 2;
*/
public boolean hasError() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional string error = 2;
*/
public java.lang.String getError() {
java.lang.Object ref = error_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
error_ = s;
}
return s;
}
}
/**
* optional string error = 2;
*/
public com.google.protobuf.ByteString
getErrorBytes() {
java.lang.Object ref = error_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
error_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
error_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasStatus()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, status_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getErrorBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, status_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getErrorBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto) obj;
boolean result = true;
result = result && (hasStatus() == other.hasStatus());
if (hasStatus()) {
result = result &&
(getStatus() == other.getStatus());
}
result = result && (hasError() == other.hasError());
if (hasError()) {
result = result && getError()
.equals(other.getError());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasStatus()) {
hash = (37 * hash) + STATUS_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getStatus());
}
if (hasError()) {
hash = (37 * hash) + ERROR_FIELD_NUMBER;
hash = (53 * hash) + getError().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ReleaseShortCircuitAccessResponseProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
bitField0_ = (bitField0_ & ~0x00000001);
error_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.status_ = status_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.error_ = error_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto.getDefaultInstance()) return this;
if (other.hasStatus()) {
setStatus(other.getStatus());
}
if (other.hasError()) {
bitField0_ |= 0x00000002;
error_ = other.error_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasStatus()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.Status status = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
/**
* required .hadoop.hdfs.Status status = 1;
*/
public boolean hasStatus() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
return status_;
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
status_ = value;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public Builder clearStatus() {
bitField0_ = (bitField0_ & ~0x00000001);
status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
onChanged();
return this;
}
// optional string error = 2;
private java.lang.Object error_ = "";
/**
* optional string error = 2;
*/
public boolean hasError() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional string error = 2;
*/
public java.lang.String getError() {
java.lang.Object ref = error_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
error_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string error = 2;
*/
public com.google.protobuf.ByteString
getErrorBytes() {
java.lang.Object ref = error_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
error_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* optional string error = 2;
*/
public Builder setError(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
error_ = value;
onChanged();
return this;
}
/**
* optional string error = 2;
*/
public Builder clearError() {
bitField0_ = (bitField0_ & ~0x00000002);
error_ = getDefaultInstance().getError();
onChanged();
return this;
}
/**
* optional string error = 2;
*/
public Builder setErrorBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
error_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ReleaseShortCircuitAccessResponseProto)
}
static {
defaultInstance = new ReleaseShortCircuitAccessResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ReleaseShortCircuitAccessResponseProto)
}
public interface ShortCircuitShmRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string clientName = 1;
/**
* required string clientName = 1;
*
*
* The name of the client requesting the shared memory segment. This is
* purely for logging / debugging purposes.
*
*/
boolean hasClientName();
/**
* required string clientName = 1;
*
*
* The name of the client requesting the shared memory segment. This is
* purely for logging / debugging purposes.
*
*/
java.lang.String getClientName();
/**
* required string clientName = 1;
*
*
* The name of the client requesting the shared memory segment. This is
* purely for logging / debugging purposes.
*
*/
com.google.protobuf.ByteString
getClientNameBytes();
// optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
boolean hasTraceInfo();
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getTraceInfo();
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder getTraceInfoOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.ShortCircuitShmRequestProto}
*/
public static final class ShortCircuitShmRequestProto extends
com.google.protobuf.GeneratedMessage
implements ShortCircuitShmRequestProtoOrBuilder {
// Use ShortCircuitShmRequestProto.newBuilder() to construct.
private ShortCircuitShmRequestProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ShortCircuitShmRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ShortCircuitShmRequestProto defaultInstance;
public static ShortCircuitShmRequestProto getDefaultInstance() {
return defaultInstance;
}
public ShortCircuitShmRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ShortCircuitShmRequestProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
clientName_ = input.readBytes();
break;
}
case 18: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
subBuilder = traceInfo_.toBuilder();
}
traceInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(traceInfo_);
traceInfo_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ShortCircuitShmRequestProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ShortCircuitShmRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required string clientName = 1;
public static final int CLIENTNAME_FIELD_NUMBER = 1;
private java.lang.Object clientName_;
/**
* required string clientName = 1;
*
*
* The name of the client requesting the shared memory segment. This is
* purely for logging / debugging purposes.
*
*/
public boolean hasClientName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string clientName = 1;
*
*
* The name of the client requesting the shared memory segment. This is
* purely for logging / debugging purposes.
*
*/
public java.lang.String getClientName() {
java.lang.Object ref = clientName_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
clientName_ = s;
}
return s;
}
}
/**
* required string clientName = 1;
*
*
* The name of the client requesting the shared memory segment. This is
* purely for logging / debugging purposes.
*
*/
public com.google.protobuf.ByteString
getClientNameBytes() {
java.lang.Object ref = clientName_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
clientName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
public static final int TRACEINFO_FIELD_NUMBER = 2;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto traceInfo_;
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public boolean hasTraceInfo() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getTraceInfo() {
return traceInfo_;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder getTraceInfoOrBuilder() {
return traceInfo_;
}
private void initFields() {
clientName_ = "";
traceInfo_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasClientName()) {
memoizedIsInitialized = 0;
return false;
}
if (hasTraceInfo()) {
if (!getTraceInfo().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getClientNameBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeMessage(2, traceInfo_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getClientNameBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, traceInfo_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto) obj;
boolean result = true;
result = result && (hasClientName() == other.hasClientName());
if (hasClientName()) {
result = result && getClientName()
.equals(other.getClientName());
}
result = result && (hasTraceInfo() == other.hasTraceInfo());
if (hasTraceInfo()) {
result = result && getTraceInfo()
.equals(other.getTraceInfo());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasClientName()) {
hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER;
hash = (53 * hash) + getClientName().hashCode();
}
if (hasTraceInfo()) {
hash = (37 * hash) + TRACEINFO_FIELD_NUMBER;
hash = (53 * hash) + getTraceInfo().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ShortCircuitShmRequestProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getTraceInfoFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
clientName_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
if (traceInfoBuilder_ == null) {
traceInfo_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance();
} else {
traceInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.clientName_ = clientName_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
if (traceInfoBuilder_ == null) {
result.traceInfo_ = traceInfo_;
} else {
result.traceInfo_ = traceInfoBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto.getDefaultInstance()) return this;
if (other.hasClientName()) {
bitField0_ |= 0x00000001;
clientName_ = other.clientName_;
onChanged();
}
if (other.hasTraceInfo()) {
mergeTraceInfo(other.getTraceInfo());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasClientName()) {
return false;
}
if (hasTraceInfo()) {
if (!getTraceInfo().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string clientName = 1;
private java.lang.Object clientName_ = "";
/**
* required string clientName = 1;
*
*
* The name of the client requesting the shared memory segment. This is
* purely for logging / debugging purposes.
*
*/
public boolean hasClientName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string clientName = 1;
*
*
* The name of the client requesting the shared memory segment. This is
* purely for logging / debugging purposes.
*
*/
public java.lang.String getClientName() {
java.lang.Object ref = clientName_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
clientName_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string clientName = 1;
*
*
* The name of the client requesting the shared memory segment. This is
* purely for logging / debugging purposes.
*
*/
public com.google.protobuf.ByteString
getClientNameBytes() {
java.lang.Object ref = clientName_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
clientName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string clientName = 1;
*
*
* The name of the client requesting the shared memory segment. This is
* purely for logging / debugging purposes.
*
*/
public Builder setClientName(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
clientName_ = value;
onChanged();
return this;
}
/**
* required string clientName = 1;
*
*
* The name of the client requesting the shared memory segment. This is
* purely for logging / debugging purposes.
*
*/
public Builder clearClientName() {
bitField0_ = (bitField0_ & ~0x00000001);
clientName_ = getDefaultInstance().getClientName();
onChanged();
return this;
}
/**
* required string clientName = 1;
*
*
* The name of the client requesting the shared memory segment. This is
* purely for logging / debugging purposes.
*
*/
public Builder setClientNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
clientName_ = value;
onChanged();
return this;
}
// optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto traceInfo_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder> traceInfoBuilder_;
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public boolean hasTraceInfo() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getTraceInfo() {
if (traceInfoBuilder_ == null) {
return traceInfo_;
} else {
return traceInfoBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public Builder setTraceInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto value) {
if (traceInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
traceInfo_ = value;
onChanged();
} else {
traceInfoBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public Builder setTraceInfo(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder builderForValue) {
if (traceInfoBuilder_ == null) {
traceInfo_ = builderForValue.build();
onChanged();
} else {
traceInfoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
return this;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public Builder mergeTraceInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto value) {
if (traceInfoBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002) &&
traceInfo_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance()) {
traceInfo_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.newBuilder(traceInfo_).mergeFrom(value).buildPartial();
} else {
traceInfo_ = value;
}
onChanged();
} else {
traceInfoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public Builder clearTraceInfo() {
if (traceInfoBuilder_ == null) {
traceInfo_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance();
onChanged();
} else {
traceInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder getTraceInfoBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getTraceInfoFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder getTraceInfoOrBuilder() {
if (traceInfoBuilder_ != null) {
return traceInfoBuilder_.getMessageOrBuilder();
} else {
return traceInfo_;
}
}
/**
* optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder>
getTraceInfoFieldBuilder() {
if (traceInfoBuilder_ == null) {
traceInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder>(
traceInfo_,
getParentForChildren(),
isClean());
traceInfo_ = null;
}
return traceInfoBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ShortCircuitShmRequestProto)
}
static {
defaultInstance = new ShortCircuitShmRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ShortCircuitShmRequestProto)
}
public interface ShortCircuitShmResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.Status status = 1;
/**
* required .hadoop.hdfs.Status status = 1;
*/
boolean hasStatus();
/**
* required .hadoop.hdfs.Status status = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus();
// optional string error = 2;
/**
* optional string error = 2;
*/
boolean hasError();
/**
* optional string error = 2;
*/
java.lang.String getError();
/**
* optional string error = 2;
*/
com.google.protobuf.ByteString
getErrorBytes();
// optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
boolean hasId();
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getId();
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder getIdOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.ShortCircuitShmResponseProto}
*/
public static final class ShortCircuitShmResponseProto extends
com.google.protobuf.GeneratedMessage
implements ShortCircuitShmResponseProtoOrBuilder {
// Use ShortCircuitShmResponseProto.newBuilder() to construct.
private ShortCircuitShmResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ShortCircuitShmResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ShortCircuitShmResponseProto defaultInstance;
public static ShortCircuitShmResponseProto getDefaultInstance() {
return defaultInstance;
}
public ShortCircuitShmResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ShortCircuitShmResponseProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
status_ = value;
}
break;
}
case 18: {
bitField0_ |= 0x00000002;
error_ = input.readBytes();
break;
}
case 26: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
subBuilder = id_.toBuilder();
}
id_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(id_);
id_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000004;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ShortCircuitShmResponseProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ShortCircuitShmResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.Status status = 1;
public static final int STATUS_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status status_;
/**
* required .hadoop.hdfs.Status status = 1;
*/
public boolean hasStatus() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
return status_;
}
// optional string error = 2;
public static final int ERROR_FIELD_NUMBER = 2;
private java.lang.Object error_;
/**
* optional string error = 2;
*/
public boolean hasError() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional string error = 2;
*/
public java.lang.String getError() {
java.lang.Object ref = error_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
error_ = s;
}
return s;
}
}
/**
* optional string error = 2;
*/
public com.google.protobuf.ByteString
getErrorBytes() {
java.lang.Object ref = error_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
error_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
public static final int ID_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto id_;
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
public boolean hasId() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getId() {
return id_;
}
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder getIdOrBuilder() {
return id_;
}
private void initFields() {
status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
error_ = "";
id_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasStatus()) {
memoizedIsInitialized = 0;
return false;
}
if (hasId()) {
if (!getId().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, status_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getErrorBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeMessage(3, id_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, status_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getErrorBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, id_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto) obj;
boolean result = true;
result = result && (hasStatus() == other.hasStatus());
if (hasStatus()) {
result = result &&
(getStatus() == other.getStatus());
}
result = result && (hasError() == other.hasError());
if (hasError()) {
result = result && getError()
.equals(other.getError());
}
result = result && (hasId() == other.hasId());
if (hasId()) {
result = result && getId()
.equals(other.getId());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasStatus()) {
hash = (37 * hash) + STATUS_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getStatus());
}
if (hasError()) {
hash = (37 * hash) + ERROR_FIELD_NUMBER;
hash = (53 * hash) + getError().hashCode();
}
if (hasId()) {
hash = (37 * hash) + ID_FIELD_NUMBER;
hash = (53 * hash) + getId().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ShortCircuitShmResponseProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getIdFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
bitField0_ = (bitField0_ & ~0x00000001);
error_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
if (idBuilder_ == null) {
id_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance();
} else {
idBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.status_ = status_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.error_ = error_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
if (idBuilder_ == null) {
result.id_ = id_;
} else {
result.id_ = idBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto.getDefaultInstance()) return this;
if (other.hasStatus()) {
setStatus(other.getStatus());
}
if (other.hasError()) {
bitField0_ |= 0x00000002;
error_ = other.error_;
onChanged();
}
if (other.hasId()) {
mergeId(other.getId());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasStatus()) {
return false;
}
if (hasId()) {
if (!getId().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.Status status = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
/**
* required .hadoop.hdfs.Status status = 1;
*/
public boolean hasStatus() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
return status_;
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
status_ = value;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public Builder clearStatus() {
bitField0_ = (bitField0_ & ~0x00000001);
status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
onChanged();
return this;
}
// optional string error = 2;
private java.lang.Object error_ = "";
/**
* optional string error = 2;
*/
public boolean hasError() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional string error = 2;
*/
public java.lang.String getError() {
java.lang.Object ref = error_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
error_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string error = 2;
*/
public com.google.protobuf.ByteString
getErrorBytes() {
java.lang.Object ref = error_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
error_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* optional string error = 2;
*/
public Builder setError(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
error_ = value;
onChanged();
return this;
}
/**
* optional string error = 2;
*/
public Builder clearError() {
bitField0_ = (bitField0_ & ~0x00000002);
error_ = getDefaultInstance().getError();
onChanged();
return this;
}
/**
* optional string error = 2;
*/
public Builder setErrorBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
error_ = value;
onChanged();
return this;
}
// optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto id_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder> idBuilder_;
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
public boolean hasId() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getId() {
if (idBuilder_ == null) {
return id_;
} else {
return idBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
public Builder setId(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto value) {
if (idBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
id_ = value;
onChanged();
} else {
idBuilder_.setMessage(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
public Builder setId(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder builderForValue) {
if (idBuilder_ == null) {
id_ = builderForValue.build();
onChanged();
} else {
idBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
public Builder mergeId(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto value) {
if (idBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004) &&
id_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance()) {
id_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.newBuilder(id_).mergeFrom(value).buildPartial();
} else {
id_ = value;
}
onChanged();
} else {
idBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
public Builder clearId() {
if (idBuilder_ == null) {
id_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance();
onChanged();
} else {
idBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder getIdBuilder() {
bitField0_ |= 0x00000004;
onChanged();
return getIdFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder getIdOrBuilder() {
if (idBuilder_ != null) {
return idBuilder_.getMessageOrBuilder();
} else {
return id_;
}
}
/**
* optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder>
getIdFieldBuilder() {
if (idBuilder_ == null) {
idBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder>(
id_,
getParentForChildren(),
isClean());
id_ = null;
}
return idBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ShortCircuitShmResponseProto)
}
static {
defaultInstance = new ShortCircuitShmResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ShortCircuitShmResponseProto)
}
public interface PacketHeaderProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required sfixed64 offsetInBlock = 1;
/**
* required sfixed64 offsetInBlock = 1;
*
*
* All fields must be fixed-length!
*
*/
boolean hasOffsetInBlock();
/**
* required sfixed64 offsetInBlock = 1;
*
*
* All fields must be fixed-length!
*
*/
long getOffsetInBlock();
// required sfixed64 seqno = 2;
/**
* required sfixed64 seqno = 2;
*/
boolean hasSeqno();
/**
* required sfixed64 seqno = 2;
*/
long getSeqno();
// required bool lastPacketInBlock = 3;
/**
* required bool lastPacketInBlock = 3;
*/
boolean hasLastPacketInBlock();
/**
* required bool lastPacketInBlock = 3;
*/
boolean getLastPacketInBlock();
// required sfixed32 dataLen = 4;
/**
* required sfixed32 dataLen = 4;
*/
boolean hasDataLen();
/**
* required sfixed32 dataLen = 4;
*/
int getDataLen();
// optional bool syncBlock = 5 [default = false];
/**
* optional bool syncBlock = 5 [default = false];
*/
boolean hasSyncBlock();
/**
* optional bool syncBlock = 5 [default = false];
*/
boolean getSyncBlock();
}
/**
* Protobuf type {@code hadoop.hdfs.PacketHeaderProto}
*/
public static final class PacketHeaderProto extends
com.google.protobuf.GeneratedMessage
implements PacketHeaderProtoOrBuilder {
// Use PacketHeaderProto.newBuilder() to construct.
private PacketHeaderProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private PacketHeaderProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final PacketHeaderProto defaultInstance;
public static PacketHeaderProto getDefaultInstance() {
return defaultInstance;
}
public PacketHeaderProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private PacketHeaderProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 9: {
bitField0_ |= 0x00000001;
offsetInBlock_ = input.readSFixed64();
break;
}
case 17: {
bitField0_ |= 0x00000002;
seqno_ = input.readSFixed64();
break;
}
case 24: {
bitField0_ |= 0x00000004;
lastPacketInBlock_ = input.readBool();
break;
}
case 37: {
bitField0_ |= 0x00000008;
dataLen_ = input.readSFixed32();
break;
}
case 40: {
bitField0_ |= 0x00000010;
syncBlock_ = input.readBool();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PacketHeaderProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PacketHeaderProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public PacketHeaderProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new PacketHeaderProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required sfixed64 offsetInBlock = 1;
public static final int OFFSETINBLOCK_FIELD_NUMBER = 1;
private long offsetInBlock_;
/**
* required sfixed64 offsetInBlock = 1;
*
*
* All fields must be fixed-length!
*
*/
public boolean hasOffsetInBlock() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required sfixed64 offsetInBlock = 1;
*
*
* All fields must be fixed-length!
*
*/
public long getOffsetInBlock() {
return offsetInBlock_;
}
// required sfixed64 seqno = 2;
public static final int SEQNO_FIELD_NUMBER = 2;
private long seqno_;
/**
* required sfixed64 seqno = 2;
*/
public boolean hasSeqno() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required sfixed64 seqno = 2;
*/
public long getSeqno() {
return seqno_;
}
// required bool lastPacketInBlock = 3;
public static final int LASTPACKETINBLOCK_FIELD_NUMBER = 3;
private boolean lastPacketInBlock_;
/**
* required bool lastPacketInBlock = 3;
*/
public boolean hasLastPacketInBlock() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required bool lastPacketInBlock = 3;
*/
public boolean getLastPacketInBlock() {
return lastPacketInBlock_;
}
// required sfixed32 dataLen = 4;
public static final int DATALEN_FIELD_NUMBER = 4;
private int dataLen_;
/**
* required sfixed32 dataLen = 4;
*/
public boolean hasDataLen() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required sfixed32 dataLen = 4;
*/
public int getDataLen() {
return dataLen_;
}
// optional bool syncBlock = 5 [default = false];
public static final int SYNCBLOCK_FIELD_NUMBER = 5;
private boolean syncBlock_;
/**
* optional bool syncBlock = 5 [default = false];
*/
public boolean hasSyncBlock() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional bool syncBlock = 5 [default = false];
*/
public boolean getSyncBlock() {
return syncBlock_;
}
private void initFields() {
offsetInBlock_ = 0L;
seqno_ = 0L;
lastPacketInBlock_ = false;
dataLen_ = 0;
syncBlock_ = false;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasOffsetInBlock()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasSeqno()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasLastPacketInBlock()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasDataLen()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeSFixed64(1, offsetInBlock_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeSFixed64(2, seqno_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBool(3, lastPacketInBlock_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeSFixed32(4, dataLen_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeBool(5, syncBlock_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeSFixed64Size(1, offsetInBlock_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeSFixed64Size(2, seqno_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(3, lastPacketInBlock_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeSFixed32Size(4, dataLen_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(5, syncBlock_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto) obj;
boolean result = true;
result = result && (hasOffsetInBlock() == other.hasOffsetInBlock());
if (hasOffsetInBlock()) {
result = result && (getOffsetInBlock()
== other.getOffsetInBlock());
}
result = result && (hasSeqno() == other.hasSeqno());
if (hasSeqno()) {
result = result && (getSeqno()
== other.getSeqno());
}
result = result && (hasLastPacketInBlock() == other.hasLastPacketInBlock());
if (hasLastPacketInBlock()) {
result = result && (getLastPacketInBlock()
== other.getLastPacketInBlock());
}
result = result && (hasDataLen() == other.hasDataLen());
if (hasDataLen()) {
result = result && (getDataLen()
== other.getDataLen());
}
result = result && (hasSyncBlock() == other.hasSyncBlock());
if (hasSyncBlock()) {
result = result && (getSyncBlock()
== other.getSyncBlock());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasOffsetInBlock()) {
hash = (37 * hash) + OFFSETINBLOCK_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getOffsetInBlock());
}
if (hasSeqno()) {
hash = (37 * hash) + SEQNO_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getSeqno());
}
if (hasLastPacketInBlock()) {
hash = (37 * hash) + LASTPACKETINBLOCK_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getLastPacketInBlock());
}
if (hasDataLen()) {
hash = (37 * hash) + DATALEN_FIELD_NUMBER;
hash = (53 * hash) + getDataLen();
}
if (hasSyncBlock()) {
hash = (37 * hash) + SYNCBLOCK_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getSyncBlock());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.PacketHeaderProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PacketHeaderProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PacketHeaderProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
offsetInBlock_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
seqno_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
lastPacketInBlock_ = false;
bitField0_ = (bitField0_ & ~0x00000004);
dataLen_ = 0;
bitField0_ = (bitField0_ & ~0x00000008);
syncBlock_ = false;
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PacketHeaderProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.offsetInBlock_ = offsetInBlock_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.seqno_ = seqno_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.lastPacketInBlock_ = lastPacketInBlock_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.dataLen_ = dataLen_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.syncBlock_ = syncBlock_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.getDefaultInstance()) return this;
if (other.hasOffsetInBlock()) {
setOffsetInBlock(other.getOffsetInBlock());
}
if (other.hasSeqno()) {
setSeqno(other.getSeqno());
}
if (other.hasLastPacketInBlock()) {
setLastPacketInBlock(other.getLastPacketInBlock());
}
if (other.hasDataLen()) {
setDataLen(other.getDataLen());
}
if (other.hasSyncBlock()) {
setSyncBlock(other.getSyncBlock());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasOffsetInBlock()) {
return false;
}
if (!hasSeqno()) {
return false;
}
if (!hasLastPacketInBlock()) {
return false;
}
if (!hasDataLen()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required sfixed64 offsetInBlock = 1;
private long offsetInBlock_ ;
/**
* required sfixed64 offsetInBlock = 1;
*
*
* All fields must be fixed-length!
*
*/
public boolean hasOffsetInBlock() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required sfixed64 offsetInBlock = 1;
*
*
* All fields must be fixed-length!
*
*/
public long getOffsetInBlock() {
return offsetInBlock_;
}
/**
* required sfixed64 offsetInBlock = 1;
*
*
* All fields must be fixed-length!
*
*/
public Builder setOffsetInBlock(long value) {
bitField0_ |= 0x00000001;
offsetInBlock_ = value;
onChanged();
return this;
}
/**
* required sfixed64 offsetInBlock = 1;
*
*
* All fields must be fixed-length!
*
*/
public Builder clearOffsetInBlock() {
bitField0_ = (bitField0_ & ~0x00000001);
offsetInBlock_ = 0L;
onChanged();
return this;
}
// required sfixed64 seqno = 2;
private long seqno_ ;
/**
* required sfixed64 seqno = 2;
*/
public boolean hasSeqno() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required sfixed64 seqno = 2;
*/
public long getSeqno() {
return seqno_;
}
/**
* required sfixed64 seqno = 2;
*/
public Builder setSeqno(long value) {
bitField0_ |= 0x00000002;
seqno_ = value;
onChanged();
return this;
}
/**
* required sfixed64 seqno = 2;
*/
public Builder clearSeqno() {
bitField0_ = (bitField0_ & ~0x00000002);
seqno_ = 0L;
onChanged();
return this;
}
// required bool lastPacketInBlock = 3;
private boolean lastPacketInBlock_ ;
/**
* required bool lastPacketInBlock = 3;
*/
public boolean hasLastPacketInBlock() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required bool lastPacketInBlock = 3;
*/
public boolean getLastPacketInBlock() {
return lastPacketInBlock_;
}
/**
* required bool lastPacketInBlock = 3;
*/
public Builder setLastPacketInBlock(boolean value) {
bitField0_ |= 0x00000004;
lastPacketInBlock_ = value;
onChanged();
return this;
}
/**
* required bool lastPacketInBlock = 3;
*/
public Builder clearLastPacketInBlock() {
bitField0_ = (bitField0_ & ~0x00000004);
lastPacketInBlock_ = false;
onChanged();
return this;
}
// required sfixed32 dataLen = 4;
private int dataLen_ ;
/**
* required sfixed32 dataLen = 4;
*/
public boolean hasDataLen() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* required sfixed32 dataLen = 4;
*/
public int getDataLen() {
return dataLen_;
}
/**
* required sfixed32 dataLen = 4;
*/
public Builder setDataLen(int value) {
bitField0_ |= 0x00000008;
dataLen_ = value;
onChanged();
return this;
}
/**
* required sfixed32 dataLen = 4;
*/
public Builder clearDataLen() {
bitField0_ = (bitField0_ & ~0x00000008);
dataLen_ = 0;
onChanged();
return this;
}
// optional bool syncBlock = 5 [default = false];
private boolean syncBlock_ ;
/**
* optional bool syncBlock = 5 [default = false];
*/
public boolean hasSyncBlock() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional bool syncBlock = 5 [default = false];
*/
public boolean getSyncBlock() {
return syncBlock_;
}
/**
* optional bool syncBlock = 5 [default = false];
*/
public Builder setSyncBlock(boolean value) {
bitField0_ |= 0x00000010;
syncBlock_ = value;
onChanged();
return this;
}
/**
* optional bool syncBlock = 5 [default = false];
*/
public Builder clearSyncBlock() {
bitField0_ = (bitField0_ & ~0x00000010);
syncBlock_ = false;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.PacketHeaderProto)
}
static {
defaultInstance = new PacketHeaderProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.PacketHeaderProto)
}
public interface PipelineAckProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required sint64 seqno = 1;
/**
* required sint64 seqno = 1;
*/
boolean hasSeqno();
/**
* required sint64 seqno = 1;
*/
long getSeqno();
// repeated .hadoop.hdfs.Status reply = 2;
/**
* repeated .hadoop.hdfs.Status reply = 2;
*/
java.util.List getReplyList();
/**
* repeated .hadoop.hdfs.Status reply = 2;
*/
int getReplyCount();
/**
* repeated .hadoop.hdfs.Status reply = 2;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getReply(int index);
// optional uint64 downstreamAckTimeNanos = 3 [default = 0];
/**
* optional uint64 downstreamAckTimeNanos = 3 [default = 0];
*/
boolean hasDownstreamAckTimeNanos();
/**
* optional uint64 downstreamAckTimeNanos = 3 [default = 0];
*/
long getDownstreamAckTimeNanos();
// repeated uint32 flag = 4 [packed = true];
/**
* repeated uint32 flag = 4 [packed = true];
*/
java.util.List getFlagList();
/**
* repeated uint32 flag = 4 [packed = true];
*/
int getFlagCount();
/**
* repeated uint32 flag = 4 [packed = true];
*/
int getFlag(int index);
}
/**
* Protobuf type {@code hadoop.hdfs.PipelineAckProto}
*/
public static final class PipelineAckProto extends
com.google.protobuf.GeneratedMessage
implements PipelineAckProtoOrBuilder {
// Use PipelineAckProto.newBuilder() to construct.
private PipelineAckProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private PipelineAckProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final PipelineAckProto defaultInstance;
public static PipelineAckProto getDefaultInstance() {
return defaultInstance;
}
public PipelineAckProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private PipelineAckProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
seqno_ = input.readSInt64();
break;
}
case 16: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(2, rawValue);
} else {
if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
reply_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000002;
}
reply_.add(value);
}
break;
}
case 18: {
int length = input.readRawVarint32();
int oldLimit = input.pushLimit(length);
while(input.getBytesUntilLimit() > 0) {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(2, rawValue);
} else {
if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
reply_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000002;
}
reply_.add(value);
}
}
input.popLimit(oldLimit);
break;
}
case 24: {
bitField0_ |= 0x00000002;
downstreamAckTimeNanos_ = input.readUInt64();
break;
}
case 32: {
if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
flag_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000008;
}
flag_.add(input.readUInt32());
break;
}
case 34: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00000008) == 0x00000008) && input.getBytesUntilLimit() > 0) {
flag_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000008;
}
while (input.getBytesUntilLimit() > 0) {
flag_.add(input.readUInt32());
}
input.popLimit(limit);
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
reply_ = java.util.Collections.unmodifiableList(reply_);
}
if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
flag_ = java.util.Collections.unmodifiableList(flag_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PipelineAckProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PipelineAckProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public PipelineAckProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new PipelineAckProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required sint64 seqno = 1;
public static final int SEQNO_FIELD_NUMBER = 1;
private long seqno_;
/**
* required sint64 seqno = 1;
*/
public boolean hasSeqno() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required sint64 seqno = 1;
*/
public long getSeqno() {
return seqno_;
}
// repeated .hadoop.hdfs.Status reply = 2;
public static final int REPLY_FIELD_NUMBER = 2;
private java.util.List reply_;
/**
* repeated .hadoop.hdfs.Status reply = 2;
*/
public java.util.List getReplyList() {
return reply_;
}
/**
* repeated .hadoop.hdfs.Status reply = 2;
*/
public int getReplyCount() {
return reply_.size();
}
/**
* repeated .hadoop.hdfs.Status reply = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getReply(int index) {
return reply_.get(index);
}
// optional uint64 downstreamAckTimeNanos = 3 [default = 0];
public static final int DOWNSTREAMACKTIMENANOS_FIELD_NUMBER = 3;
private long downstreamAckTimeNanos_;
/**
* optional uint64 downstreamAckTimeNanos = 3 [default = 0];
*/
public boolean hasDownstreamAckTimeNanos() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional uint64 downstreamAckTimeNanos = 3 [default = 0];
*/
public long getDownstreamAckTimeNanos() {
return downstreamAckTimeNanos_;
}
// repeated uint32 flag = 4 [packed = true];
public static final int FLAG_FIELD_NUMBER = 4;
private java.util.List flag_;
/**
* repeated uint32 flag = 4 [packed = true];
*/
public java.util.List
getFlagList() {
return flag_;
}
/**
* repeated uint32 flag = 4 [packed = true];
*/
public int getFlagCount() {
return flag_.size();
}
/**
* repeated uint32 flag = 4 [packed = true];
*/
public int getFlag(int index) {
return flag_.get(index);
}
private int flagMemoizedSerializedSize = -1;
private void initFields() {
seqno_ = 0L;
reply_ = java.util.Collections.emptyList();
downstreamAckTimeNanos_ = 0L;
flag_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasSeqno()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeSInt64(1, seqno_);
}
for (int i = 0; i < reply_.size(); i++) {
output.writeEnum(2, reply_.get(i).getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(3, downstreamAckTimeNanos_);
}
if (getFlagList().size() > 0) {
output.writeRawVarint32(34);
output.writeRawVarint32(flagMemoizedSerializedSize);
}
for (int i = 0; i < flag_.size(); i++) {
output.writeUInt32NoTag(flag_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeSInt64Size(1, seqno_);
}
{
int dataSize = 0;
for (int i = 0; i < reply_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeEnumSizeNoTag(reply_.get(i).getNumber());
}
size += dataSize;
size += 1 * reply_.size();
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(3, downstreamAckTimeNanos_);
}
{
int dataSize = 0;
for (int i = 0; i < flag_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeUInt32SizeNoTag(flag_.get(i));
}
size += dataSize;
if (!getFlagList().isEmpty()) {
size += 1;
size += com.google.protobuf.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
flagMemoizedSerializedSize = dataSize;
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto) obj;
boolean result = true;
result = result && (hasSeqno() == other.hasSeqno());
if (hasSeqno()) {
result = result && (getSeqno()
== other.getSeqno());
}
result = result && getReplyList()
.equals(other.getReplyList());
result = result && (hasDownstreamAckTimeNanos() == other.hasDownstreamAckTimeNanos());
if (hasDownstreamAckTimeNanos()) {
result = result && (getDownstreamAckTimeNanos()
== other.getDownstreamAckTimeNanos());
}
result = result && getFlagList()
.equals(other.getFlagList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasSeqno()) {
hash = (37 * hash) + SEQNO_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getSeqno());
}
if (getReplyCount() > 0) {
hash = (37 * hash) + REPLY_FIELD_NUMBER;
hash = (53 * hash) + hashEnumList(getReplyList());
}
if (hasDownstreamAckTimeNanos()) {
hash = (37 * hash) + DOWNSTREAMACKTIMENANOS_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getDownstreamAckTimeNanos());
}
if (getFlagCount() > 0) {
hash = (37 * hash) + FLAG_FIELD_NUMBER;
hash = (53 * hash) + getFlagList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.PipelineAckProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PipelineAckProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PipelineAckProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
seqno_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
reply_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
downstreamAckTimeNanos_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
flag_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PipelineAckProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.seqno_ = seqno_;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
reply_ = java.util.Collections.unmodifiableList(reply_);
bitField0_ = (bitField0_ & ~0x00000002);
}
result.reply_ = reply_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000002;
}
result.downstreamAckTimeNanos_ = downstreamAckTimeNanos_;
if (((bitField0_ & 0x00000008) == 0x00000008)) {
flag_ = java.util.Collections.unmodifiableList(flag_);
bitField0_ = (bitField0_ & ~0x00000008);
}
result.flag_ = flag_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.getDefaultInstance()) return this;
if (other.hasSeqno()) {
setSeqno(other.getSeqno());
}
if (!other.reply_.isEmpty()) {
if (reply_.isEmpty()) {
reply_ = other.reply_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureReplyIsMutable();
reply_.addAll(other.reply_);
}
onChanged();
}
if (other.hasDownstreamAckTimeNanos()) {
setDownstreamAckTimeNanos(other.getDownstreamAckTimeNanos());
}
if (!other.flag_.isEmpty()) {
if (flag_.isEmpty()) {
flag_ = other.flag_;
bitField0_ = (bitField0_ & ~0x00000008);
} else {
ensureFlagIsMutable();
flag_.addAll(other.flag_);
}
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasSeqno()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required sint64 seqno = 1;
private long seqno_ ;
/**
* required sint64 seqno = 1;
*/
public boolean hasSeqno() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required sint64 seqno = 1;
*/
public long getSeqno() {
return seqno_;
}
/**
* required sint64 seqno = 1;
*/
public Builder setSeqno(long value) {
bitField0_ |= 0x00000001;
seqno_ = value;
onChanged();
return this;
}
/**
* required sint64 seqno = 1;
*/
public Builder clearSeqno() {
bitField0_ = (bitField0_ & ~0x00000001);
seqno_ = 0L;
onChanged();
return this;
}
// repeated .hadoop.hdfs.Status reply = 2;
private java.util.List reply_ =
java.util.Collections.emptyList();
private void ensureReplyIsMutable() {
if (!((bitField0_ & 0x00000002) == 0x00000002)) {
reply_ = new java.util.ArrayList(reply_);
bitField0_ |= 0x00000002;
}
}
/**
* repeated .hadoop.hdfs.Status reply = 2;
*/
public java.util.List getReplyList() {
return java.util.Collections.unmodifiableList(reply_);
}
/**
* repeated .hadoop.hdfs.Status reply = 2;
*/
public int getReplyCount() {
return reply_.size();
}
/**
* repeated .hadoop.hdfs.Status reply = 2;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getReply(int index) {
return reply_.get(index);
}
/**
* repeated .hadoop.hdfs.Status reply = 2;
*/
public Builder setReply(
int index, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
if (value == null) {
throw new NullPointerException();
}
ensureReplyIsMutable();
reply_.set(index, value);
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.Status reply = 2;
*/
public Builder addReply(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
if (value == null) {
throw new NullPointerException();
}
ensureReplyIsMutable();
reply_.add(value);
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.Status reply = 2;
*/
public Builder addAllReply(
java.lang.Iterable extends org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status> values) {
ensureReplyIsMutable();
super.addAll(values, reply_);
onChanged();
return this;
}
/**
* repeated .hadoop.hdfs.Status reply = 2;
*/
public Builder clearReply() {
reply_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
return this;
}
// optional uint64 downstreamAckTimeNanos = 3 [default = 0];
private long downstreamAckTimeNanos_ ;
/**
* optional uint64 downstreamAckTimeNanos = 3 [default = 0];
*/
public boolean hasDownstreamAckTimeNanos() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional uint64 downstreamAckTimeNanos = 3 [default = 0];
*/
public long getDownstreamAckTimeNanos() {
return downstreamAckTimeNanos_;
}
/**
* optional uint64 downstreamAckTimeNanos = 3 [default = 0];
*/
public Builder setDownstreamAckTimeNanos(long value) {
bitField0_ |= 0x00000004;
downstreamAckTimeNanos_ = value;
onChanged();
return this;
}
/**
* optional uint64 downstreamAckTimeNanos = 3 [default = 0];
*/
public Builder clearDownstreamAckTimeNanos() {
bitField0_ = (bitField0_ & ~0x00000004);
downstreamAckTimeNanos_ = 0L;
onChanged();
return this;
}
// repeated uint32 flag = 4 [packed = true];
private java.util.List flag_ = java.util.Collections.emptyList();
private void ensureFlagIsMutable() {
if (!((bitField0_ & 0x00000008) == 0x00000008)) {
flag_ = new java.util.ArrayList(flag_);
bitField0_ |= 0x00000008;
}
}
/**
* repeated uint32 flag = 4 [packed = true];
*/
public java.util.List
getFlagList() {
return java.util.Collections.unmodifiableList(flag_);
}
/**
* repeated uint32 flag = 4 [packed = true];
*/
public int getFlagCount() {
return flag_.size();
}
/**
* repeated uint32 flag = 4 [packed = true];
*/
public int getFlag(int index) {
return flag_.get(index);
}
/**
* repeated uint32 flag = 4 [packed = true];
*/
public Builder setFlag(
int index, int value) {
ensureFlagIsMutable();
flag_.set(index, value);
onChanged();
return this;
}
/**
* repeated uint32 flag = 4 [packed = true];
*/
public Builder addFlag(int value) {
ensureFlagIsMutable();
flag_.add(value);
onChanged();
return this;
}
/**
* repeated uint32 flag = 4 [packed = true];
*/
public Builder addAllFlag(
java.lang.Iterable extends java.lang.Integer> values) {
ensureFlagIsMutable();
super.addAll(values, flag_);
onChanged();
return this;
}
/**
* repeated uint32 flag = 4 [packed = true];
*/
public Builder clearFlag() {
flag_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000008);
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.PipelineAckProto)
}
static {
defaultInstance = new PipelineAckProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.PipelineAckProto)
}
public interface ReadOpChecksumInfoProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.ChecksumProto checksum = 1;
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
boolean hasChecksum();
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getChecksum();
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getChecksumOrBuilder();
// required uint64 chunkOffset = 2;
/**
* required uint64 chunkOffset = 2;
*
*
**
* The offset into the block at which the first packet
* will start. This is necessary since reads will align
* backwards to a checksum chunk boundary.
*
*/
boolean hasChunkOffset();
/**
* required uint64 chunkOffset = 2;
*
*
**
* The offset into the block at which the first packet
* will start. This is necessary since reads will align
* backwards to a checksum chunk boundary.
*
*/
long getChunkOffset();
}
/**
* Protobuf type {@code hadoop.hdfs.ReadOpChecksumInfoProto}
*
*
**
* Sent as part of the BlockOpResponseProto
* for READ_BLOCK and COPY_BLOCK operations.
*
*/
public static final class ReadOpChecksumInfoProto extends
com.google.protobuf.GeneratedMessage
implements ReadOpChecksumInfoProtoOrBuilder {
// Use ReadOpChecksumInfoProto.newBuilder() to construct.
private ReadOpChecksumInfoProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ReadOpChecksumInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ReadOpChecksumInfoProto defaultInstance;
public static ReadOpChecksumInfoProto getDefaultInstance() {
return defaultInstance;
}
public ReadOpChecksumInfoProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ReadOpChecksumInfoProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = checksum_.toBuilder();
}
checksum_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(checksum_);
checksum_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 16: {
bitField0_ |= 0x00000002;
chunkOffset_ = input.readUInt64();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ReadOpChecksumInfoProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ReadOpChecksumInfoProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.ChecksumProto checksum = 1;
public static final int CHECKSUM_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto checksum_;
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
public boolean hasChecksum() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getChecksum() {
return checksum_;
}
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getChecksumOrBuilder() {
return checksum_;
}
// required uint64 chunkOffset = 2;
public static final int CHUNKOFFSET_FIELD_NUMBER = 2;
private long chunkOffset_;
/**
* required uint64 chunkOffset = 2;
*
*
**
* The offset into the block at which the first packet
* will start. This is necessary since reads will align
* backwards to a checksum chunk boundary.
*
*/
public boolean hasChunkOffset() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 chunkOffset = 2;
*
*
**
* The offset into the block at which the first packet
* will start. This is necessary since reads will align
* backwards to a checksum chunk boundary.
*
*/
public long getChunkOffset() {
return chunkOffset_;
}
private void initFields() {
checksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance();
chunkOffset_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasChecksum()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasChunkOffset()) {
memoizedIsInitialized = 0;
return false;
}
if (!getChecksum().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, checksum_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, chunkOffset_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, checksum_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, chunkOffset_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto) obj;
boolean result = true;
result = result && (hasChecksum() == other.hasChecksum());
if (hasChecksum()) {
result = result && getChecksum()
.equals(other.getChecksum());
}
result = result && (hasChunkOffset() == other.hasChunkOffset());
if (hasChunkOffset()) {
result = result && (getChunkOffset()
== other.getChunkOffset());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasChecksum()) {
hash = (37 * hash) + CHECKSUM_FIELD_NUMBER;
hash = (53 * hash) + getChecksum().hashCode();
}
if (hasChunkOffset()) {
hash = (37 * hash) + CHUNKOFFSET_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getChunkOffset());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ReadOpChecksumInfoProto}
*
*
**
* Sent as part of the BlockOpResponseProto
* for READ_BLOCK and COPY_BLOCK operations.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getChecksumFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (checksumBuilder_ == null) {
checksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance();
} else {
checksumBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
chunkOffset_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (checksumBuilder_ == null) {
result.checksum_ = checksum_;
} else {
result.checksum_ = checksumBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.chunkOffset_ = chunkOffset_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance()) return this;
if (other.hasChecksum()) {
mergeChecksum(other.getChecksum());
}
if (other.hasChunkOffset()) {
setChunkOffset(other.getChunkOffset());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasChecksum()) {
return false;
}
if (!hasChunkOffset()) {
return false;
}
if (!getChecksum().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.ChecksumProto checksum = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto checksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder> checksumBuilder_;
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
public boolean hasChecksum() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getChecksum() {
if (checksumBuilder_ == null) {
return checksum_;
} else {
return checksumBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
public Builder setChecksum(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto value) {
if (checksumBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
checksum_ = value;
onChanged();
} else {
checksumBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
public Builder setChecksum(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder builderForValue) {
if (checksumBuilder_ == null) {
checksum_ = builderForValue.build();
onChanged();
} else {
checksumBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
public Builder mergeChecksum(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto value) {
if (checksumBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
checksum_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance()) {
checksum_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.newBuilder(checksum_).mergeFrom(value).buildPartial();
} else {
checksum_ = value;
}
onChanged();
} else {
checksumBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
public Builder clearChecksum() {
if (checksumBuilder_ == null) {
checksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance();
onChanged();
} else {
checksumBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder getChecksumBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getChecksumFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getChecksumOrBuilder() {
if (checksumBuilder_ != null) {
return checksumBuilder_.getMessageOrBuilder();
} else {
return checksum_;
}
}
/**
* required .hadoop.hdfs.ChecksumProto checksum = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder>
getChecksumFieldBuilder() {
if (checksumBuilder_ == null) {
checksumBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder>(
checksum_,
getParentForChildren(),
isClean());
checksum_ = null;
}
return checksumBuilder_;
}
// required uint64 chunkOffset = 2;
private long chunkOffset_ ;
/**
* required uint64 chunkOffset = 2;
*
*
**
* The offset into the block at which the first packet
* will start. This is necessary since reads will align
* backwards to a checksum chunk boundary.
*
*/
public boolean hasChunkOffset() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 chunkOffset = 2;
*
*
**
* The offset into the block at which the first packet
* will start. This is necessary since reads will align
* backwards to a checksum chunk boundary.
*
*/
public long getChunkOffset() {
return chunkOffset_;
}
/**
* required uint64 chunkOffset = 2;
*
*
**
* The offset into the block at which the first packet
* will start. This is necessary since reads will align
* backwards to a checksum chunk boundary.
*
*/
public Builder setChunkOffset(long value) {
bitField0_ |= 0x00000002;
chunkOffset_ = value;
onChanged();
return this;
}
/**
* required uint64 chunkOffset = 2;
*
*
**
* The offset into the block at which the first packet
* will start. This is necessary since reads will align
* backwards to a checksum chunk boundary.
*
*/
public Builder clearChunkOffset() {
bitField0_ = (bitField0_ & ~0x00000002);
chunkOffset_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ReadOpChecksumInfoProto)
}
static {
defaultInstance = new ReadOpChecksumInfoProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ReadOpChecksumInfoProto)
}
public interface BlockOpResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.Status status = 1;
/**
* required .hadoop.hdfs.Status status = 1;
*/
boolean hasStatus();
/**
* required .hadoop.hdfs.Status status = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus();
// optional string firstBadLink = 2;
/**
* optional string firstBadLink = 2;
*/
boolean hasFirstBadLink();
/**
* optional string firstBadLink = 2;
*/
java.lang.String getFirstBadLink();
/**
* optional string firstBadLink = 2;
*/
com.google.protobuf.ByteString
getFirstBadLinkBytes();
// optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
boolean hasChecksumResponse();
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto getChecksumResponse();
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder getChecksumResponseOrBuilder();
// optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
boolean hasReadOpChecksumInfo();
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto getReadOpChecksumInfo();
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder getReadOpChecksumInfoOrBuilder();
// optional string message = 5;
/**
* optional string message = 5;
*
*
** explanatory text which may be useful to log on the client side
*
*/
boolean hasMessage();
/**
* optional string message = 5;
*
*
** explanatory text which may be useful to log on the client side
*
*/
java.lang.String getMessage();
/**
* optional string message = 5;
*
*
** explanatory text which may be useful to log on the client side
*
*/
com.google.protobuf.ByteString
getMessageBytes();
// optional uint32 shortCircuitAccessVersion = 6;
/**
* optional uint32 shortCircuitAccessVersion = 6;
*
*
** If the server chooses to agree to the request of a client for
* short-circuit access, it will send a response message with the relevant
* file descriptors attached.
*
* In the body of the message, this version number will be set to the
* specific version number of the block data that the client is about to
* read.
*
*/
boolean hasShortCircuitAccessVersion();
/**
* optional uint32 shortCircuitAccessVersion = 6;
*
*
** If the server chooses to agree to the request of a client for
* short-circuit access, it will send a response message with the relevant
* file descriptors attached.
*
* In the body of the message, this version number will be set to the
* specific version number of the block data that the client is about to
* read.
*
*/
int getShortCircuitAccessVersion();
}
/**
* Protobuf type {@code hadoop.hdfs.BlockOpResponseProto}
*/
public static final class BlockOpResponseProto extends
com.google.protobuf.GeneratedMessage
implements BlockOpResponseProtoOrBuilder {
// Use BlockOpResponseProto.newBuilder() to construct.
private BlockOpResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private BlockOpResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final BlockOpResponseProto defaultInstance;
public static BlockOpResponseProto getDefaultInstance() {
return defaultInstance;
}
public BlockOpResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private BlockOpResponseProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
status_ = value;
}
break;
}
case 18: {
bitField0_ |= 0x00000002;
firstBadLink_ = input.readBytes();
break;
}
case 26: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
subBuilder = checksumResponse_.toBuilder();
}
checksumResponse_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(checksumResponse_);
checksumResponse_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000004;
break;
}
case 34: {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000008) == 0x00000008)) {
subBuilder = readOpChecksumInfo_.toBuilder();
}
readOpChecksumInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(readOpChecksumInfo_);
readOpChecksumInfo_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000008;
break;
}
case 42: {
bitField0_ |= 0x00000010;
message_ = input.readBytes();
break;
}
case 48: {
bitField0_ |= 0x00000020;
shortCircuitAccessVersion_ = input.readUInt32();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BlockOpResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BlockOpResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public BlockOpResponseProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new BlockOpResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.Status status = 1;
public static final int STATUS_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status status_;
/**
* required .hadoop.hdfs.Status status = 1;
*/
public boolean hasStatus() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
return status_;
}
// optional string firstBadLink = 2;
public static final int FIRSTBADLINK_FIELD_NUMBER = 2;
private java.lang.Object firstBadLink_;
/**
* optional string firstBadLink = 2;
*/
public boolean hasFirstBadLink() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional string firstBadLink = 2;
*/
public java.lang.String getFirstBadLink() {
java.lang.Object ref = firstBadLink_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
firstBadLink_ = s;
}
return s;
}
}
/**
* optional string firstBadLink = 2;
*/
public com.google.protobuf.ByteString
getFirstBadLinkBytes() {
java.lang.Object ref = firstBadLink_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
firstBadLink_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
public static final int CHECKSUMRESPONSE_FIELD_NUMBER = 3;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto checksumResponse_;
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
public boolean hasChecksumResponse() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto getChecksumResponse() {
return checksumResponse_;
}
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder getChecksumResponseOrBuilder() {
return checksumResponse_;
}
// optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
public static final int READOPCHECKSUMINFO_FIELD_NUMBER = 4;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto readOpChecksumInfo_;
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
public boolean hasReadOpChecksumInfo() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto getReadOpChecksumInfo() {
return readOpChecksumInfo_;
}
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder getReadOpChecksumInfoOrBuilder() {
return readOpChecksumInfo_;
}
// optional string message = 5;
public static final int MESSAGE_FIELD_NUMBER = 5;
private java.lang.Object message_;
/**
* optional string message = 5;
*
*
** explanatory text which may be useful to log on the client side
*
*/
public boolean hasMessage() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional string message = 5;
*
*
** explanatory text which may be useful to log on the client side
*
*/
public java.lang.String getMessage() {
java.lang.Object ref = message_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
message_ = s;
}
return s;
}
}
/**
* optional string message = 5;
*
*
** explanatory text which may be useful to log on the client side
*
*/
public com.google.protobuf.ByteString
getMessageBytes() {
java.lang.Object ref = message_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
message_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// optional uint32 shortCircuitAccessVersion = 6;
public static final int SHORTCIRCUITACCESSVERSION_FIELD_NUMBER = 6;
private int shortCircuitAccessVersion_;
/**
* optional uint32 shortCircuitAccessVersion = 6;
*
*
** If the server chooses to agree to the request of a client for
* short-circuit access, it will send a response message with the relevant
* file descriptors attached.
*
* In the body of the message, this version number will be set to the
* specific version number of the block data that the client is about to
* read.
*
*/
public boolean hasShortCircuitAccessVersion() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional uint32 shortCircuitAccessVersion = 6;
*
*
** If the server chooses to agree to the request of a client for
* short-circuit access, it will send a response message with the relevant
* file descriptors attached.
*
* In the body of the message, this version number will be set to the
* specific version number of the block data that the client is about to
* read.
*
*/
public int getShortCircuitAccessVersion() {
return shortCircuitAccessVersion_;
}
private void initFields() {
status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
firstBadLink_ = "";
checksumResponse_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance();
readOpChecksumInfo_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance();
message_ = "";
shortCircuitAccessVersion_ = 0;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasStatus()) {
memoizedIsInitialized = 0;
return false;
}
if (hasChecksumResponse()) {
if (!getChecksumResponse().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasReadOpChecksumInfo()) {
if (!getReadOpChecksumInfo().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, status_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getFirstBadLinkBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeMessage(3, checksumResponse_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeMessage(4, readOpChecksumInfo_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeBytes(5, getMessageBytes());
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeUInt32(6, shortCircuitAccessVersion_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, status_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getFirstBadLinkBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, checksumResponse_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(4, readOpChecksumInfo_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(5, getMessageBytes());
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(6, shortCircuitAccessVersion_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto) obj;
boolean result = true;
result = result && (hasStatus() == other.hasStatus());
if (hasStatus()) {
result = result &&
(getStatus() == other.getStatus());
}
result = result && (hasFirstBadLink() == other.hasFirstBadLink());
if (hasFirstBadLink()) {
result = result && getFirstBadLink()
.equals(other.getFirstBadLink());
}
result = result && (hasChecksumResponse() == other.hasChecksumResponse());
if (hasChecksumResponse()) {
result = result && getChecksumResponse()
.equals(other.getChecksumResponse());
}
result = result && (hasReadOpChecksumInfo() == other.hasReadOpChecksumInfo());
if (hasReadOpChecksumInfo()) {
result = result && getReadOpChecksumInfo()
.equals(other.getReadOpChecksumInfo());
}
result = result && (hasMessage() == other.hasMessage());
if (hasMessage()) {
result = result && getMessage()
.equals(other.getMessage());
}
result = result && (hasShortCircuitAccessVersion() == other.hasShortCircuitAccessVersion());
if (hasShortCircuitAccessVersion()) {
result = result && (getShortCircuitAccessVersion()
== other.getShortCircuitAccessVersion());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasStatus()) {
hash = (37 * hash) + STATUS_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getStatus());
}
if (hasFirstBadLink()) {
hash = (37 * hash) + FIRSTBADLINK_FIELD_NUMBER;
hash = (53 * hash) + getFirstBadLink().hashCode();
}
if (hasChecksumResponse()) {
hash = (37 * hash) + CHECKSUMRESPONSE_FIELD_NUMBER;
hash = (53 * hash) + getChecksumResponse().hashCode();
}
if (hasReadOpChecksumInfo()) {
hash = (37 * hash) + READOPCHECKSUMINFO_FIELD_NUMBER;
hash = (53 * hash) + getReadOpChecksumInfo().hashCode();
}
if (hasMessage()) {
hash = (37 * hash) + MESSAGE_FIELD_NUMBER;
hash = (53 * hash) + getMessage().hashCode();
}
if (hasShortCircuitAccessVersion()) {
hash = (37 * hash) + SHORTCIRCUITACCESSVERSION_FIELD_NUMBER;
hash = (53 * hash) + getShortCircuitAccessVersion();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.BlockOpResponseProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BlockOpResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BlockOpResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getChecksumResponseFieldBuilder();
getReadOpChecksumInfoFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
bitField0_ = (bitField0_ & ~0x00000001);
firstBadLink_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
if (checksumResponseBuilder_ == null) {
checksumResponse_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance();
} else {
checksumResponseBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
if (readOpChecksumInfoBuilder_ == null) {
readOpChecksumInfo_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance();
} else {
readOpChecksumInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
message_ = "";
bitField0_ = (bitField0_ & ~0x00000010);
shortCircuitAccessVersion_ = 0;
bitField0_ = (bitField0_ & ~0x00000020);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BlockOpResponseProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.status_ = status_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.firstBadLink_ = firstBadLink_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
if (checksumResponseBuilder_ == null) {
result.checksumResponse_ = checksumResponse_;
} else {
result.checksumResponse_ = checksumResponseBuilder_.build();
}
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
if (readOpChecksumInfoBuilder_ == null) {
result.readOpChecksumInfo_ = readOpChecksumInfo_;
} else {
result.readOpChecksumInfo_ = readOpChecksumInfoBuilder_.build();
}
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.message_ = message_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000020;
}
result.shortCircuitAccessVersion_ = shortCircuitAccessVersion_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.getDefaultInstance()) return this;
if (other.hasStatus()) {
setStatus(other.getStatus());
}
if (other.hasFirstBadLink()) {
bitField0_ |= 0x00000002;
firstBadLink_ = other.firstBadLink_;
onChanged();
}
if (other.hasChecksumResponse()) {
mergeChecksumResponse(other.getChecksumResponse());
}
if (other.hasReadOpChecksumInfo()) {
mergeReadOpChecksumInfo(other.getReadOpChecksumInfo());
}
if (other.hasMessage()) {
bitField0_ |= 0x00000010;
message_ = other.message_;
onChanged();
}
if (other.hasShortCircuitAccessVersion()) {
setShortCircuitAccessVersion(other.getShortCircuitAccessVersion());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasStatus()) {
return false;
}
if (hasChecksumResponse()) {
if (!getChecksumResponse().isInitialized()) {
return false;
}
}
if (hasReadOpChecksumInfo()) {
if (!getReadOpChecksumInfo().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.Status status = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
/**
* required .hadoop.hdfs.Status status = 1;
*/
public boolean hasStatus() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
return status_;
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
status_ = value;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public Builder clearStatus() {
bitField0_ = (bitField0_ & ~0x00000001);
status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
onChanged();
return this;
}
// optional string firstBadLink = 2;
private java.lang.Object firstBadLink_ = "";
/**
* optional string firstBadLink = 2;
*/
public boolean hasFirstBadLink() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional string firstBadLink = 2;
*/
public java.lang.String getFirstBadLink() {
java.lang.Object ref = firstBadLink_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
firstBadLink_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string firstBadLink = 2;
*/
public com.google.protobuf.ByteString
getFirstBadLinkBytes() {
java.lang.Object ref = firstBadLink_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
firstBadLink_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* optional string firstBadLink = 2;
*/
public Builder setFirstBadLink(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
firstBadLink_ = value;
onChanged();
return this;
}
/**
* optional string firstBadLink = 2;
*/
public Builder clearFirstBadLink() {
bitField0_ = (bitField0_ & ~0x00000002);
firstBadLink_ = getDefaultInstance().getFirstBadLink();
onChanged();
return this;
}
/**
* optional string firstBadLink = 2;
*/
public Builder setFirstBadLinkBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
firstBadLink_ = value;
onChanged();
return this;
}
// optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto checksumResponse_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder> checksumResponseBuilder_;
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
public boolean hasChecksumResponse() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto getChecksumResponse() {
if (checksumResponseBuilder_ == null) {
return checksumResponse_;
} else {
return checksumResponseBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
public Builder setChecksumResponse(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto value) {
if (checksumResponseBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
checksumResponse_ = value;
onChanged();
} else {
checksumResponseBuilder_.setMessage(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
public Builder setChecksumResponse(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder builderForValue) {
if (checksumResponseBuilder_ == null) {
checksumResponse_ = builderForValue.build();
onChanged();
} else {
checksumResponseBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
public Builder mergeChecksumResponse(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto value) {
if (checksumResponseBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004) &&
checksumResponse_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance()) {
checksumResponse_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.newBuilder(checksumResponse_).mergeFrom(value).buildPartial();
} else {
checksumResponse_ = value;
}
onChanged();
} else {
checksumResponseBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
public Builder clearChecksumResponse() {
if (checksumResponseBuilder_ == null) {
checksumResponse_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance();
onChanged();
} else {
checksumResponseBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder getChecksumResponseBuilder() {
bitField0_ |= 0x00000004;
onChanged();
return getChecksumResponseFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder getChecksumResponseOrBuilder() {
if (checksumResponseBuilder_ != null) {
return checksumResponseBuilder_.getMessageOrBuilder();
} else {
return checksumResponse_;
}
}
/**
* optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder>
getChecksumResponseFieldBuilder() {
if (checksumResponseBuilder_ == null) {
checksumResponseBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder>(
checksumResponse_,
getParentForChildren(),
isClean());
checksumResponse_ = null;
}
return checksumResponseBuilder_;
}
// optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto readOpChecksumInfo_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder> readOpChecksumInfoBuilder_;
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
public boolean hasReadOpChecksumInfo() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto getReadOpChecksumInfo() {
if (readOpChecksumInfoBuilder_ == null) {
return readOpChecksumInfo_;
} else {
return readOpChecksumInfoBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
public Builder setReadOpChecksumInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto value) {
if (readOpChecksumInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
readOpChecksumInfo_ = value;
onChanged();
} else {
readOpChecksumInfoBuilder_.setMessage(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
public Builder setReadOpChecksumInfo(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder builderForValue) {
if (readOpChecksumInfoBuilder_ == null) {
readOpChecksumInfo_ = builderForValue.build();
onChanged();
} else {
readOpChecksumInfoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
public Builder mergeReadOpChecksumInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto value) {
if (readOpChecksumInfoBuilder_ == null) {
if (((bitField0_ & 0x00000008) == 0x00000008) &&
readOpChecksumInfo_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance()) {
readOpChecksumInfo_ =
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.newBuilder(readOpChecksumInfo_).mergeFrom(value).buildPartial();
} else {
readOpChecksumInfo_ = value;
}
onChanged();
} else {
readOpChecksumInfoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
public Builder clearReadOpChecksumInfo() {
if (readOpChecksumInfoBuilder_ == null) {
readOpChecksumInfo_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance();
onChanged();
} else {
readOpChecksumInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder getReadOpChecksumInfoBuilder() {
bitField0_ |= 0x00000008;
onChanged();
return getReadOpChecksumInfoFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder getReadOpChecksumInfoOrBuilder() {
if (readOpChecksumInfoBuilder_ != null) {
return readOpChecksumInfoBuilder_.getMessageOrBuilder();
} else {
return readOpChecksumInfo_;
}
}
/**
* optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder>
getReadOpChecksumInfoFieldBuilder() {
if (readOpChecksumInfoBuilder_ == null) {
readOpChecksumInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder>(
readOpChecksumInfo_,
getParentForChildren(),
isClean());
readOpChecksumInfo_ = null;
}
return readOpChecksumInfoBuilder_;
}
// optional string message = 5;
private java.lang.Object message_ = "";
/**
* optional string message = 5;
*
*
** explanatory text which may be useful to log on the client side
*
*/
public boolean hasMessage() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional string message = 5;
*
*
** explanatory text which may be useful to log on the client side
*
*/
public java.lang.String getMessage() {
java.lang.Object ref = message_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
message_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string message = 5;
*
*
** explanatory text which may be useful to log on the client side
*
*/
public com.google.protobuf.ByteString
getMessageBytes() {
java.lang.Object ref = message_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
message_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* optional string message = 5;
*
*
** explanatory text which may be useful to log on the client side
*
*/
public Builder setMessage(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000010;
message_ = value;
onChanged();
return this;
}
/**
* optional string message = 5;
*
*
** explanatory text which may be useful to log on the client side
*
*/
public Builder clearMessage() {
bitField0_ = (bitField0_ & ~0x00000010);
message_ = getDefaultInstance().getMessage();
onChanged();
return this;
}
/**
* optional string message = 5;
*
*
** explanatory text which may be useful to log on the client side
*
*/
public Builder setMessageBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000010;
message_ = value;
onChanged();
return this;
}
// optional uint32 shortCircuitAccessVersion = 6;
private int shortCircuitAccessVersion_ ;
/**
* optional uint32 shortCircuitAccessVersion = 6;
*
*
** If the server chooses to agree to the request of a client for
* short-circuit access, it will send a response message with the relevant
* file descriptors attached.
*
* In the body of the message, this version number will be set to the
* specific version number of the block data that the client is about to
* read.
*
*/
public boolean hasShortCircuitAccessVersion() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional uint32 shortCircuitAccessVersion = 6;
*
*
** If the server chooses to agree to the request of a client for
* short-circuit access, it will send a response message with the relevant
* file descriptors attached.
*
* In the body of the message, this version number will be set to the
* specific version number of the block data that the client is about to
* read.
*
*/
public int getShortCircuitAccessVersion() {
return shortCircuitAccessVersion_;
}
/**
* optional uint32 shortCircuitAccessVersion = 6;
*
*
** If the server chooses to agree to the request of a client for
* short-circuit access, it will send a response message with the relevant
* file descriptors attached.
*
* In the body of the message, this version number will be set to the
* specific version number of the block data that the client is about to
* read.
*
*/
public Builder setShortCircuitAccessVersion(int value) {
bitField0_ |= 0x00000020;
shortCircuitAccessVersion_ = value;
onChanged();
return this;
}
/**
* optional uint32 shortCircuitAccessVersion = 6;
*
*
** If the server chooses to agree to the request of a client for
* short-circuit access, it will send a response message with the relevant
* file descriptors attached.
*
* In the body of the message, this version number will be set to the
* specific version number of the block data that the client is about to
* read.
*
*/
public Builder clearShortCircuitAccessVersion() {
bitField0_ = (bitField0_ & ~0x00000020);
shortCircuitAccessVersion_ = 0;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.BlockOpResponseProto)
}
static {
defaultInstance = new BlockOpResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.BlockOpResponseProto)
}
public interface ClientReadStatusProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.Status status = 1;
/**
* required .hadoop.hdfs.Status status = 1;
*/
boolean hasStatus();
/**
* required .hadoop.hdfs.Status status = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus();
}
/**
* Protobuf type {@code hadoop.hdfs.ClientReadStatusProto}
*
*
**
* Message sent from the client to the DN after reading the entire
* read request.
*
*/
public static final class ClientReadStatusProto extends
com.google.protobuf.GeneratedMessage
implements ClientReadStatusProtoOrBuilder {
// Use ClientReadStatusProto.newBuilder() to construct.
private ClientReadStatusProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ClientReadStatusProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ClientReadStatusProto defaultInstance;
public static ClientReadStatusProto getDefaultInstance() {
return defaultInstance;
}
public ClientReadStatusProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ClientReadStatusProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
status_ = value;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientReadStatusProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientReadStatusProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ClientReadStatusProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ClientReadStatusProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.Status status = 1;
public static final int STATUS_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status status_;
/**
* required .hadoop.hdfs.Status status = 1;
*/
public boolean hasStatus() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
return status_;
}
private void initFields() {
status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasStatus()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, status_.getNumber());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, status_.getNumber());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto) obj;
boolean result = true;
result = result && (hasStatus() == other.hasStatus());
if (hasStatus()) {
result = result &&
(getStatus() == other.getStatus());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasStatus()) {
hash = (37 * hash) + STATUS_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getStatus());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ClientReadStatusProto}
*
*
**
* Message sent from the client to the DN after reading the entire
* read request.
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientReadStatusProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientReadStatusProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientReadStatusProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.status_ = status_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.getDefaultInstance()) return this;
if (other.hasStatus()) {
setStatus(other.getStatus());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasStatus()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.Status status = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
/**
* required .hadoop.hdfs.Status status = 1;
*/
public boolean hasStatus() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
return status_;
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
status_ = value;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public Builder clearStatus() {
bitField0_ = (bitField0_ & ~0x00000001);
status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ClientReadStatusProto)
}
static {
defaultInstance = new ClientReadStatusProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ClientReadStatusProto)
}
public interface DNTransferAckProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.hdfs.Status status = 1;
/**
* required .hadoop.hdfs.Status status = 1;
*/
boolean hasStatus();
/**
* required .hadoop.hdfs.Status status = 1;
*/
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus();
}
/**
* Protobuf type {@code hadoop.hdfs.DNTransferAckProto}
*/
public static final class DNTransferAckProto extends
com.google.protobuf.GeneratedMessage
implements DNTransferAckProtoOrBuilder {
// Use DNTransferAckProto.newBuilder() to construct.
private DNTransferAckProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private DNTransferAckProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final DNTransferAckProto defaultInstance;
public static DNTransferAckProto getDefaultInstance() {
return defaultInstance;
}
public DNTransferAckProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DNTransferAckProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
status_ = value;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DNTransferAckProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DNTransferAckProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public DNTransferAckProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new DNTransferAckProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.hdfs.Status status = 1;
public static final int STATUS_FIELD_NUMBER = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status status_;
/**
* required .hadoop.hdfs.Status status = 1;
*/
public boolean hasStatus() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
return status_;
}
private void initFields() {
status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasStatus()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, status_.getNumber());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, status_.getNumber());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto) obj;
boolean result = true;
result = result && (hasStatus() == other.hasStatus());
if (hasStatus()) {
result = result &&
(getStatus() == other.getStatus());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasStatus()) {
hash = (37 * hash) + STATUS_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getStatus());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.DNTransferAckProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DNTransferAckProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DNTransferAckProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DNTransferAckProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.status_ = status_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.getDefaultInstance()) return this;
if (other.hasStatus()) {
setStatus(other.getStatus());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasStatus()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.hdfs.Status status = 1;
private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
/**
* required .hadoop.hdfs.Status status = 1;
*/
public boolean hasStatus() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
return status_;
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
status_ = value;
onChanged();
return this;
}
/**
* required .hadoop.hdfs.Status status = 1;
*/
public Builder clearStatus() {
bitField0_ = (bitField0_ & ~0x00000001);
status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DNTransferAckProto)
}
static {
defaultInstance = new DNTransferAckProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DNTransferAckProto)
}
public interface OpBlockChecksumResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required uint32 bytesPerCrc = 1;
/**
* required uint32 bytesPerCrc = 1;
*/
boolean hasBytesPerCrc();
/**
* required uint32 bytesPerCrc = 1;
*/
int getBytesPerCrc();
// required uint64 crcPerBlock = 2;
/**
* required uint64 crcPerBlock = 2;
*/
boolean hasCrcPerBlock();
/**
* required uint64 crcPerBlock = 2;
*/
long getCrcPerBlock();
// required bytes blockChecksum = 3;
/**
* required bytes blockChecksum = 3;
*/
boolean hasBlockChecksum();
/**
* required bytes blockChecksum = 3;
*/
com.google.protobuf.ByteString getBlockChecksum();
// optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;
/**
* optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;
*/
boolean hasCrcType();
/**
* optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getCrcType();
// optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
boolean hasBlockChecksumOptions();
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions();
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.OpBlockChecksumResponseProto}
*/
public static final class OpBlockChecksumResponseProto extends
com.google.protobuf.GeneratedMessage
implements OpBlockChecksumResponseProtoOrBuilder {
// Use OpBlockChecksumResponseProto.newBuilder() to construct.
private OpBlockChecksumResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private OpBlockChecksumResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final OpBlockChecksumResponseProto defaultInstance;
public static OpBlockChecksumResponseProto getDefaultInstance() {
return defaultInstance;
}
public OpBlockChecksumResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private OpBlockChecksumResponseProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
bytesPerCrc_ = input.readUInt32();
break;
}
case 16: {
bitField0_ |= 0x00000002;
crcPerBlock_ = input.readUInt64();
break;
}
case 26: {
bitField0_ |= 0x00000004;
blockChecksum_ = input.readBytes();
break;
}
case 32: {
int rawValue = input.readEnum();
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(4, rawValue);
} else {
bitField0_ |= 0x00000008;
crcType_ = value;
}
break;
}
case 42: {
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000010) == 0x00000010)) {
subBuilder = blockChecksumOptions_.toBuilder();
}
blockChecksumOptions_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(blockChecksumOptions_);
blockChecksumOptions_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000010;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public OpBlockChecksumResponseProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new OpBlockChecksumResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required uint32 bytesPerCrc = 1;
public static final int BYTESPERCRC_FIELD_NUMBER = 1;
private int bytesPerCrc_;
/**
* required uint32 bytesPerCrc = 1;
*/
public boolean hasBytesPerCrc() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint32 bytesPerCrc = 1;
*/
public int getBytesPerCrc() {
return bytesPerCrc_;
}
// required uint64 crcPerBlock = 2;
public static final int CRCPERBLOCK_FIELD_NUMBER = 2;
private long crcPerBlock_;
/**
* required uint64 crcPerBlock = 2;
*/
public boolean hasCrcPerBlock() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 crcPerBlock = 2;
*/
public long getCrcPerBlock() {
return crcPerBlock_;
}
// required bytes blockChecksum = 3;
public static final int BLOCKCHECKSUM_FIELD_NUMBER = 3;
private com.google.protobuf.ByteString blockChecksum_;
/**
* required bytes blockChecksum = 3;
*/
public boolean hasBlockChecksum() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required bytes blockChecksum = 3;
*/
public com.google.protobuf.ByteString getBlockChecksum() {
return blockChecksum_;
}
// optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;
public static final int CRCTYPE_FIELD_NUMBER = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto crcType_;
/**
* optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;
*/
public boolean hasCrcType() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getCrcType() {
return crcType_;
}
// optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
public static final int BLOCKCHECKSUMOPTIONS_FIELD_NUMBER = 5;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto blockChecksumOptions_;
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
public boolean hasBlockChecksumOptions() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions() {
return blockChecksumOptions_;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder() {
return blockChecksumOptions_;
}
private void initFields() {
bytesPerCrc_ = 0;
crcPerBlock_ = 0L;
blockChecksum_ = com.google.protobuf.ByteString.EMPTY;
crcType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL;
blockChecksumOptions_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasBytesPerCrc()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasCrcPerBlock()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasBlockChecksum()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt32(1, bytesPerCrc_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, crcPerBlock_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, blockChecksum_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeEnum(4, crcType_.getNumber());
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeMessage(5, blockChecksumOptions_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(1, bytesPerCrc_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, crcPerBlock_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, blockChecksum_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(4, crcType_.getNumber());
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(5, blockChecksumOptions_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto) obj;
boolean result = true;
result = result && (hasBytesPerCrc() == other.hasBytesPerCrc());
if (hasBytesPerCrc()) {
result = result && (getBytesPerCrc()
== other.getBytesPerCrc());
}
result = result && (hasCrcPerBlock() == other.hasCrcPerBlock());
if (hasCrcPerBlock()) {
result = result && (getCrcPerBlock()
== other.getCrcPerBlock());
}
result = result && (hasBlockChecksum() == other.hasBlockChecksum());
if (hasBlockChecksum()) {
result = result && getBlockChecksum()
.equals(other.getBlockChecksum());
}
result = result && (hasCrcType() == other.hasCrcType());
if (hasCrcType()) {
result = result &&
(getCrcType() == other.getCrcType());
}
result = result && (hasBlockChecksumOptions() == other.hasBlockChecksumOptions());
if (hasBlockChecksumOptions()) {
result = result && getBlockChecksumOptions()
.equals(other.getBlockChecksumOptions());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasBytesPerCrc()) {
hash = (37 * hash) + BYTESPERCRC_FIELD_NUMBER;
hash = (53 * hash) + getBytesPerCrc();
}
if (hasCrcPerBlock()) {
hash = (37 * hash) + CRCPERBLOCK_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getCrcPerBlock());
}
if (hasBlockChecksum()) {
hash = (37 * hash) + BLOCKCHECKSUM_FIELD_NUMBER;
hash = (53 * hash) + getBlockChecksum().hashCode();
}
if (hasCrcType()) {
hash = (37 * hash) + CRCTYPE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getCrcType());
}
if (hasBlockChecksumOptions()) {
hash = (37 * hash) + BLOCKCHECKSUMOPTIONS_FIELD_NUMBER;
hash = (53 * hash) + getBlockChecksumOptions().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.OpBlockChecksumResponseProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getBlockChecksumOptionsFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
bytesPerCrc_ = 0;
bitField0_ = (bitField0_ & ~0x00000001);
crcPerBlock_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
blockChecksum_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000004);
crcType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL;
bitField0_ = (bitField0_ & ~0x00000008);
if (blockChecksumOptionsBuilder_ == null) {
blockChecksumOptions_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance();
} else {
blockChecksumOptionsBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.bytesPerCrc_ = bytesPerCrc_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.crcPerBlock_ = crcPerBlock_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.blockChecksum_ = blockChecksum_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.crcType_ = crcType_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
if (blockChecksumOptionsBuilder_ == null) {
result.blockChecksumOptions_ = blockChecksumOptions_;
} else {
result.blockChecksumOptions_ = blockChecksumOptionsBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance()) return this;
if (other.hasBytesPerCrc()) {
setBytesPerCrc(other.getBytesPerCrc());
}
if (other.hasCrcPerBlock()) {
setCrcPerBlock(other.getCrcPerBlock());
}
if (other.hasBlockChecksum()) {
setBlockChecksum(other.getBlockChecksum());
}
if (other.hasCrcType()) {
setCrcType(other.getCrcType());
}
if (other.hasBlockChecksumOptions()) {
mergeBlockChecksumOptions(other.getBlockChecksumOptions());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasBytesPerCrc()) {
return false;
}
if (!hasCrcPerBlock()) {
return false;
}
if (!hasBlockChecksum()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required uint32 bytesPerCrc = 1;
private int bytesPerCrc_ ;
/**
* required uint32 bytesPerCrc = 1;
*/
public boolean hasBytesPerCrc() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required uint32 bytesPerCrc = 1;
*/
public int getBytesPerCrc() {
return bytesPerCrc_;
}
/**
* required uint32 bytesPerCrc = 1;
*/
public Builder setBytesPerCrc(int value) {
bitField0_ |= 0x00000001;
bytesPerCrc_ = value;
onChanged();
return this;
}
/**
* required uint32 bytesPerCrc = 1;
*/
public Builder clearBytesPerCrc() {
bitField0_ = (bitField0_ & ~0x00000001);
bytesPerCrc_ = 0;
onChanged();
return this;
}
// required uint64 crcPerBlock = 2;
private long crcPerBlock_ ;
/**
* required uint64 crcPerBlock = 2;
*/
public boolean hasCrcPerBlock() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required uint64 crcPerBlock = 2;
*/
public long getCrcPerBlock() {
return crcPerBlock_;
}
/**
* required uint64 crcPerBlock = 2;
*/
public Builder setCrcPerBlock(long value) {
bitField0_ |= 0x00000002;
crcPerBlock_ = value;
onChanged();
return this;
}
/**
* required uint64 crcPerBlock = 2;
*/
public Builder clearCrcPerBlock() {
bitField0_ = (bitField0_ & ~0x00000002);
crcPerBlock_ = 0L;
onChanged();
return this;
}
// required bytes blockChecksum = 3;
private com.google.protobuf.ByteString blockChecksum_ = com.google.protobuf.ByteString.EMPTY;
/**
* required bytes blockChecksum = 3;
*/
public boolean hasBlockChecksum() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* required bytes blockChecksum = 3;
*/
public com.google.protobuf.ByteString getBlockChecksum() {
return blockChecksum_;
}
/**
* required bytes blockChecksum = 3;
*/
public Builder setBlockChecksum(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
blockChecksum_ = value;
onChanged();
return this;
}
/**
* required bytes blockChecksum = 3;
*/
public Builder clearBlockChecksum() {
bitField0_ = (bitField0_ & ~0x00000004);
blockChecksum_ = getDefaultInstance().getBlockChecksum();
onChanged();
return this;
}
// optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto crcType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL;
/**
* optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;
*/
public boolean hasCrcType() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getCrcType() {
return crcType_;
}
/**
* optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;
*/
public Builder setCrcType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
crcType_ = value;
onChanged();
return this;
}
/**
* optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;
*/
public Builder clearCrcType() {
bitField0_ = (bitField0_ & ~0x00000008);
crcType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL;
onChanged();
return this;
}
// optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto blockChecksumOptions_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder> blockChecksumOptionsBuilder_;
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
public boolean hasBlockChecksumOptions() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions() {
if (blockChecksumOptionsBuilder_ == null) {
return blockChecksumOptions_;
} else {
return blockChecksumOptionsBuilder_.getMessage();
}
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
public Builder setBlockChecksumOptions(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto value) {
if (blockChecksumOptionsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
blockChecksumOptions_ = value;
onChanged();
} else {
blockChecksumOptionsBuilder_.setMessage(value);
}
bitField0_ |= 0x00000010;
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
public Builder setBlockChecksumOptions(
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder builderForValue) {
if (blockChecksumOptionsBuilder_ == null) {
blockChecksumOptions_ = builderForValue.build();
onChanged();
} else {
blockChecksumOptionsBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000010;
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
public Builder mergeBlockChecksumOptions(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto value) {
if (blockChecksumOptionsBuilder_ == null) {
if (((bitField0_ & 0x00000010) == 0x00000010) &&
blockChecksumOptions_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance()) {
blockChecksumOptions_ =
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.newBuilder(blockChecksumOptions_).mergeFrom(value).buildPartial();
} else {
blockChecksumOptions_ = value;
}
onChanged();
} else {
blockChecksumOptionsBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000010;
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
public Builder clearBlockChecksumOptions() {
if (blockChecksumOptionsBuilder_ == null) {
blockChecksumOptions_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance();
onChanged();
} else {
blockChecksumOptionsBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder getBlockChecksumOptionsBuilder() {
bitField0_ |= 0x00000010;
onChanged();
return getBlockChecksumOptionsFieldBuilder().getBuilder();
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder() {
if (blockChecksumOptionsBuilder_ != null) {
return blockChecksumOptionsBuilder_.getMessageOrBuilder();
} else {
return blockChecksumOptions_;
}
}
/**
* optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder>
getBlockChecksumOptionsFieldBuilder() {
if (blockChecksumOptionsBuilder_ == null) {
blockChecksumOptionsBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder>(
blockChecksumOptions_,
getParentForChildren(),
isClean());
blockChecksumOptions_ = null;
}
return blockChecksumOptionsBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpBlockChecksumResponseProto)
}
static {
defaultInstance = new OpBlockChecksumResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.OpBlockChecksumResponseProto)
}
public interface OpCustomProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string customId = 1;
/**
* required string customId = 1;
*/
boolean hasCustomId();
/**
* required string customId = 1;
*/
java.lang.String getCustomId();
/**
* required string customId = 1;
*/
com.google.protobuf.ByteString
getCustomIdBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.OpCustomProto}
*/
public static final class OpCustomProto extends
com.google.protobuf.GeneratedMessage
implements OpCustomProtoOrBuilder {
// Use OpCustomProto.newBuilder() to construct.
private OpCustomProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private OpCustomProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final OpCustomProto defaultInstance;
public static OpCustomProto getDefaultInstance() {
return defaultInstance;
}
public OpCustomProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private OpCustomProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
customId_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCustomProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCustomProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public OpCustomProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new OpCustomProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required string customId = 1;
public static final int CUSTOMID_FIELD_NUMBER = 1;
private java.lang.Object customId_;
/**
* required string customId = 1;
*/
public boolean hasCustomId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string customId = 1;
*/
public java.lang.String getCustomId() {
java.lang.Object ref = customId_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
customId_ = s;
}
return s;
}
}
/**
* required string customId = 1;
*/
public com.google.protobuf.ByteString
getCustomIdBytes() {
java.lang.Object ref = customId_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
customId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
customId_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasCustomId()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getCustomIdBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getCustomIdBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto)) {
return super.equals(obj);
}
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto) obj;
boolean result = true;
result = result && (hasCustomId() == other.hasCustomId());
if (hasCustomId()) {
result = result && getCustomId()
.equals(other.getCustomId());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasCustomId()) {
hash = (37 * hash) + CUSTOMID_FIELD_NUMBER;
hash = (53 * hash) + getCustomId().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.OpCustomProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCustomProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCustomProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto.Builder.class);
}
// Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
customId_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCustomProto_descriptor;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto getDefaultInstanceForType() {
return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto.getDefaultInstance();
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto build() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto buildPartial() {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.customId_ = customId_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto) {
return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto other) {
if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto.getDefaultInstance()) return this;
if (other.hasCustomId()) {
bitField0_ |= 0x00000001;
customId_ = other.customId_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasCustomId()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string customId = 1;
private java.lang.Object customId_ = "";
/**
* required string customId = 1;
*/
public boolean hasCustomId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string customId = 1;
*/
public java.lang.String getCustomId() {
java.lang.Object ref = customId_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
customId_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string customId = 1;
*/
public com.google.protobuf.ByteString
getCustomIdBytes() {
java.lang.Object ref = customId_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
customId_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string customId = 1;
*/
public Builder setCustomId(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
customId_ = value;
onChanged();
return this;
}
/**
* required string customId = 1;
*/
public Builder clearCustomId() {
bitField0_ = (bitField0_ & ~0x00000001);
customId_ = getDefaultInstance().getCustomId();
onChanged();
return this;
}
/**
* required string customId = 1;
*/
public Builder setCustomIdBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
customId_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpCustomProto)
}
static {
defaultInstance = new OpCustomProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.OpCustomProto)
}
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_BaseHeaderProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_BaseHeaderProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_DataTransferTraceInfoProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_DataTransferTraceInfoProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ClientOperationHeaderProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_ClientOperationHeaderProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_CachingStrategyProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_CachingStrategyProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_OpReadBlockProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_OpReadBlockProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ChecksumProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_ChecksumProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_OpWriteBlockProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_OpWriteBlockProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_OpTransferBlockProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_OpTransferBlockProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_OpReplaceBlockProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_OpReplaceBlockProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_OpCopyBlockProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_OpCopyBlockProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_OpBlockChecksumProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_OpBlockChecksumProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ShortCircuitShmIdProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_ShortCircuitShmIdProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_PacketHeaderProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_PacketHeaderProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_PipelineAckProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_PipelineAckProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_BlockOpResponseProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_BlockOpResponseProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ClientReadStatusProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_ClientReadStatusProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_DNTransferAckProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_DNTransferAckProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_OpCustomProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_hdfs_OpCustomProto_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
}
private static com.google.protobuf.Descriptors.FileDescriptor
descriptor;
static {
java.lang.String[] descriptorData = {
"\n\022datatransfer.proto\022\013hadoop.hdfs\032\016Secur" +
"ity.proto\032\nhdfs.proto\"\245\002\n!DataTransferEn" +
"cryptorMessageProto\022Z\n\006status\030\001 \002(\0162J.ha" +
"doop.hdfs.DataTransferEncryptorMessagePr" +
"oto.DataTransferEncryptorStatus\022\017\n\007paylo" +
"ad\030\002 \001(\014\022\017\n\007message\030\003 \001(\t\0224\n\014cipherOptio" +
"n\030\004 \003(\0132\036.hadoop.hdfs.CipherOptionProto\"" +
"L\n\033DataTransferEncryptorStatus\022\013\n\007SUCCES" +
"S\020\000\022\025\n\021ERROR_UNKNOWN_KEY\020\001\022\t\n\005ERROR\020\002\"\247\001" +
"\n\017BaseHeaderProto\022.\n\005block\030\001 \002(\0132\037.hadoo",
"p.hdfs.ExtendedBlockProto\022(\n\005token\030\002 \001(\013" +
"2\031.hadoop.common.TokenProto\022:\n\ttraceInfo" +
"\030\003 \001(\0132\'.hadoop.hdfs.DataTransferTraceIn" +
"foProto\"?\n\032DataTransferTraceInfoProto\022\017\n" +
"\007traceId\030\001 \002(\004\022\020\n\010parentId\030\002 \002(\004\"b\n\032Clie" +
"ntOperationHeaderProto\0220\n\nbaseHeader\030\001 \002" +
"(\0132\034.hadoop.hdfs.BaseHeaderProto\022\022\n\nclie" +
"ntName\030\002 \002(\t\"=\n\024CachingStrategyProto\022\022\n\n" +
"dropBehind\030\001 \001(\010\022\021\n\treadahead\030\002 \001(\003\"\301\001\n\020" +
"OpReadBlockProto\0227\n\006header\030\001 \002(\0132\'.hadoo",
"p.hdfs.ClientOperationHeaderProto\022\016\n\006off" +
"set\030\002 \002(\004\022\013\n\003len\030\003 \002(\004\022\033\n\rsendChecksums\030" +
"\004 \001(\010:\004true\022:\n\017cachingStrategy\030\005 \001(\0132!.h" +
"adoop.hdfs.CachingStrategyProto\"W\n\rCheck" +
"sumProto\022,\n\004type\030\001 \002(\0162\036.hadoop.hdfs.Che" +
"cksumTypeProto\022\030\n\020bytesPerChecksum\030\002 \002(\r" +
"\"\305\007\n\021OpWriteBlockProto\0227\n\006header\030\001 \002(\0132\'" +
".hadoop.hdfs.ClientOperationHeaderProto\022" +
"/\n\007targets\030\002 \003(\0132\036.hadoop.hdfs.DatanodeI" +
"nfoProto\022.\n\006source\030\003 \001(\0132\036.hadoop.hdfs.D",
"atanodeInfoProto\022D\n\005stage\030\004 \002(\01625.hadoop" +
".hdfs.OpWriteBlockProto.BlockConstructio" +
"nStage\022\024\n\014pipelineSize\030\005 \002(\r\022\024\n\014minBytes" +
"Rcvd\030\006 \002(\004\022\024\n\014maxBytesRcvd\030\007 \002(\004\022\035\n\025late" +
"stGenerationStamp\030\010 \002(\004\0225\n\021requestedChec" +
"ksum\030\t \002(\0132\032.hadoop.hdfs.ChecksumProto\022:" +
"\n\017cachingStrategy\030\n \001(\0132!.hadoop.hdfs.Ca" +
"chingStrategyProto\0228\n\013storageType\030\013 \001(\0162" +
"\035.hadoop.hdfs.StorageTypeProto:\004DISK\0229\n\022" +
"targetStorageTypes\030\014 \003(\0162\035.hadoop.hdfs.S",
"torageTypeProto\022\037\n\020allowLazyPersist\030\r \001(" +
"\010:\005false\022\026\n\007pinning\030\016 \001(\010:\005false\022\026\n\016targ" +
"etPinnings\030\017 \003(\010\022\021\n\tstorageId\030\020 \001(\t\022\030\n\020t" +
"argetStorageIds\030\021 \003(\t\"\210\002\n\026BlockConstruct" +
"ionStage\022\031\n\025PIPELINE_SETUP_APPEND\020\000\022\"\n\036P" +
"IPELINE_SETUP_APPEND_RECOVERY\020\001\022\022\n\016DATA_" +
"STREAMING\020\002\022%\n!PIPELINE_SETUP_STREAMING_" +
"RECOVERY\020\003\022\022\n\016PIPELINE_CLOSE\020\004\022\033\n\027PIPELI" +
"NE_CLOSE_RECOVERY\020\005\022\031\n\025PIPELINE_SETUP_CR" +
"EATE\020\006\022\020\n\014TRANSFER_RBW\020\007\022\026\n\022TRANSFER_FIN",
"ALIZED\020\010\"\325\001\n\024OpTransferBlockProto\0227\n\006hea" +
"der\030\001 \002(\0132\'.hadoop.hdfs.ClientOperationH" +
"eaderProto\022/\n\007targets\030\002 \003(\0132\036.hadoop.hdf" +
"s.DatanodeInfoProto\0229\n\022targetStorageType" +
"s\030\003 \003(\0162\035.hadoop.hdfs.StorageTypeProto\022\030" +
"\n\020targetStorageIds\030\004 \003(\t\"\321\001\n\023OpReplaceBl" +
"ockProto\022,\n\006header\030\001 \002(\0132\034.hadoop.hdfs.B" +
"aseHeaderProto\022\017\n\007delHint\030\002 \002(\t\022.\n\006sourc" +
"e\030\003 \002(\0132\036.hadoop.hdfs.DatanodeInfoProto\022" +
"8\n\013storageType\030\004 \001(\0162\035.hadoop.hdfs.Stora",
"geTypeProto:\004DISK\022\021\n\tstorageId\030\005 \001(\t\"@\n\020" +
"OpCopyBlockProto\022,\n\006header\030\001 \002(\0132\034.hadoo" +
"p.hdfs.BaseHeaderProto\"\212\001\n\024OpBlockChecks" +
"umProto\022,\n\006header\030\001 \002(\0132\034.hadoop.hdfs.Ba" +
"seHeaderProto\022D\n\024blockChecksumOptions\030\002 " +
"\001(\0132&.hadoop.hdfs.BlockChecksumOptionsPr" +
"oto\"\335\002\n\031OpBlockGroupChecksumProto\022,\n\006hea" +
"der\030\001 \002(\0132\034.hadoop.hdfs.BaseHeaderProto\022" +
"2\n\tdatanodes\030\002 \002(\0132\037.hadoop.hdfs.Datanod" +
"eInfosProto\022.\n\013blockTokens\030\003 \003(\0132\031.hadoo",
"p.common.TokenProto\0227\n\010ecPolicy\030\004 \002(\0132%." +
"hadoop.hdfs.ErasureCodingPolicyProto\022\024\n\014" +
"blockIndices\030\005 \003(\r\022\031\n\021requestedNumBytes\030" +
"\006 \002(\004\022D\n\024blockChecksumOptions\030\007 \001(\0132&.ha" +
"doop.hdfs.BlockChecksumOptionsProto\"0\n\026S" +
"hortCircuitShmIdProto\022\n\n\002hi\030\001 \002(\003\022\n\n\002lo\030" +
"\002 \002(\003\"_\n\030ShortCircuitShmSlotProto\0222\n\005shm" +
"Id\030\001 \002(\0132#.hadoop.hdfs.ShortCircuitShmId" +
"Proto\022\017\n\007slotIdx\030\002 \002(\005\"\307\001\n OpRequestShor" +
"tCircuitAccessProto\022,\n\006header\030\001 \002(\0132\034.ha",
"doop.hdfs.BaseHeaderProto\022\022\n\nmaxVersion\030" +
"\002 \002(\r\0225\n\006slotId\030\003 \001(\0132%.hadoop.hdfs.Shor" +
"tCircuitShmSlotProto\022*\n\033supportsReceiptV" +
"erification\030\004 \001(\010:\005false\"\232\001\n%ReleaseShor" +
"tCircuitAccessRequestProto\0225\n\006slotId\030\001 \002" +
"(\0132%.hadoop.hdfs.ShortCircuitShmSlotProt" +
"o\022:\n\ttraceInfo\030\002 \001(\0132\'.hadoop.hdfs.DataT" +
"ransferTraceInfoProto\"\\\n&ReleaseShortCir" +
"cuitAccessResponseProto\022#\n\006status\030\001 \002(\0162" +
"\023.hadoop.hdfs.Status\022\r\n\005error\030\002 \001(\t\"m\n\033S",
"hortCircuitShmRequestProto\022\022\n\nclientName" +
"\030\001 \002(\t\022:\n\ttraceInfo\030\002 \001(\0132\'.hadoop.hdfs." +
"DataTransferTraceInfoProto\"\203\001\n\034ShortCirc" +
"uitShmResponseProto\022#\n\006status\030\001 \002(\0162\023.ha" +
"doop.hdfs.Status\022\r\n\005error\030\002 \001(\t\022/\n\002id\030\003 " +
"\001(\0132#.hadoop.hdfs.ShortCircuitShmIdProto" +
"\"\177\n\021PacketHeaderProto\022\025\n\roffsetInBlock\030\001" +
" \002(\020\022\r\n\005seqno\030\002 \002(\020\022\031\n\021lastPacketInBlock" +
"\030\003 \002(\010\022\017\n\007dataLen\030\004 \002(\017\022\030\n\tsyncBlock\030\005 \001" +
"(\010:\005false\"z\n\020PipelineAckProto\022\r\n\005seqno\030\001",
" \002(\022\022\"\n\005reply\030\002 \003(\0162\023.hadoop.hdfs.Status" +
"\022!\n\026downstreamAckTimeNanos\030\003 \001(\004:\0010\022\020\n\004f" +
"lag\030\004 \003(\rB\002\020\001\"\\\n\027ReadOpChecksumInfoProto" +
"\022,\n\010checksum\030\001 \002(\0132\032.hadoop.hdfs.Checksu" +
"mProto\022\023\n\013chunkOffset\030\002 \002(\004\"\214\002\n\024BlockOpR" +
"esponseProto\022#\n\006status\030\001 \002(\0162\023.hadoop.hd" +
"fs.Status\022\024\n\014firstBadLink\030\002 \001(\t\022C\n\020check" +
"sumResponse\030\003 \001(\0132).hadoop.hdfs.OpBlockC" +
"hecksumResponseProto\022@\n\022readOpChecksumIn" +
"fo\030\004 \001(\0132$.hadoop.hdfs.ReadOpChecksumInf",
"oProto\022\017\n\007message\030\005 \001(\t\022!\n\031shortCircuitA" +
"ccessVersion\030\006 \001(\r\"<\n\025ClientReadStatusPr" +
"oto\022#\n\006status\030\001 \002(\0162\023.hadoop.hdfs.Status" +
"\"9\n\022DNTransferAckProto\022#\n\006status\030\001 \002(\0162\023" +
".hadoop.hdfs.Status\"\326\001\n\034OpBlockChecksumR" +
"esponseProto\022\023\n\013bytesPerCrc\030\001 \002(\r\022\023\n\013crc" +
"PerBlock\030\002 \002(\004\022\025\n\rblockChecksum\030\003 \002(\014\022/\n" +
"\007crcType\030\004 \001(\0162\036.hadoop.hdfs.ChecksumTyp" +
"eProto\022D\n\024blockChecksumOptions\030\005 \001(\0132&.h" +
"adoop.hdfs.BlockChecksumOptionsProto\"!\n\r",
"OpCustomProto\022\020\n\010customId\030\001 \002(\t*\214\002\n\006Stat" +
"us\022\013\n\007SUCCESS\020\000\022\t\n\005ERROR\020\001\022\022\n\016ERROR_CHEC" +
"KSUM\020\002\022\021\n\rERROR_INVALID\020\003\022\020\n\014ERROR_EXIST" +
"S\020\004\022\026\n\022ERROR_ACCESS_TOKEN\020\005\022\017\n\013CHECKSUM_" +
"OK\020\006\022\025\n\021ERROR_UNSUPPORTED\020\007\022\017\n\013OOB_RESTA" +
"RT\020\010\022\021\n\rOOB_RESERVED1\020\t\022\021\n\rOOB_RESERVED2" +
"\020\n\022\021\n\rOOB_RESERVED3\020\013\022\017\n\013IN_PROGRESS\020\014\022\026" +
"\n\022ERROR_BLOCK_PINNED\020\r*[\n\026ShortCircuitFd" +
"Response\022#\n\037DO_NOT_USE_RECEIPT_VERIFICAT" +
"ION\020\000\022\034\n\030USE_RECEIPT_VERIFICATION\020\001B>\n%o",
"rg.apache.hadoop.hdfs.protocol.protoB\022Da" +
"taTransferProtos\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
public com.google.protobuf.ExtensionRegistry assignDescriptors(
com.google.protobuf.Descriptors.FileDescriptor root) {
descriptor = root;
internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_descriptor =
getDescriptor().getMessageTypes().get(0);
internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_descriptor,
new java.lang.String[] { "Status", "Payload", "Message", "CipherOption", });
internal_static_hadoop_hdfs_BaseHeaderProto_descriptor =
getDescriptor().getMessageTypes().get(1);
internal_static_hadoop_hdfs_BaseHeaderProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_BaseHeaderProto_descriptor,
new java.lang.String[] { "Block", "Token", "TraceInfo", });
internal_static_hadoop_hdfs_DataTransferTraceInfoProto_descriptor =
getDescriptor().getMessageTypes().get(2);
internal_static_hadoop_hdfs_DataTransferTraceInfoProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_DataTransferTraceInfoProto_descriptor,
new java.lang.String[] { "TraceId", "ParentId", });
internal_static_hadoop_hdfs_ClientOperationHeaderProto_descriptor =
getDescriptor().getMessageTypes().get(3);
internal_static_hadoop_hdfs_ClientOperationHeaderProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_ClientOperationHeaderProto_descriptor,
new java.lang.String[] { "BaseHeader", "ClientName", });
internal_static_hadoop_hdfs_CachingStrategyProto_descriptor =
getDescriptor().getMessageTypes().get(4);
internal_static_hadoop_hdfs_CachingStrategyProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_CachingStrategyProto_descriptor,
new java.lang.String[] { "DropBehind", "Readahead", });
internal_static_hadoop_hdfs_OpReadBlockProto_descriptor =
getDescriptor().getMessageTypes().get(5);
internal_static_hadoop_hdfs_OpReadBlockProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_OpReadBlockProto_descriptor,
new java.lang.String[] { "Header", "Offset", "Len", "SendChecksums", "CachingStrategy", });
internal_static_hadoop_hdfs_ChecksumProto_descriptor =
getDescriptor().getMessageTypes().get(6);
internal_static_hadoop_hdfs_ChecksumProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_ChecksumProto_descriptor,
new java.lang.String[] { "Type", "BytesPerChecksum", });
internal_static_hadoop_hdfs_OpWriteBlockProto_descriptor =
getDescriptor().getMessageTypes().get(7);
internal_static_hadoop_hdfs_OpWriteBlockProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_OpWriteBlockProto_descriptor,
new java.lang.String[] { "Header", "Targets", "Source", "Stage", "PipelineSize", "MinBytesRcvd", "MaxBytesRcvd", "LatestGenerationStamp", "RequestedChecksum", "CachingStrategy", "StorageType", "TargetStorageTypes", "AllowLazyPersist", "Pinning", "TargetPinnings", "StorageId", "TargetStorageIds", });
internal_static_hadoop_hdfs_OpTransferBlockProto_descriptor =
getDescriptor().getMessageTypes().get(8);
internal_static_hadoop_hdfs_OpTransferBlockProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_OpTransferBlockProto_descriptor,
new java.lang.String[] { "Header", "Targets", "TargetStorageTypes", "TargetStorageIds", });
internal_static_hadoop_hdfs_OpReplaceBlockProto_descriptor =
getDescriptor().getMessageTypes().get(9);
internal_static_hadoop_hdfs_OpReplaceBlockProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_OpReplaceBlockProto_descriptor,
new java.lang.String[] { "Header", "DelHint", "Source", "StorageType", "StorageId", });
internal_static_hadoop_hdfs_OpCopyBlockProto_descriptor =
getDescriptor().getMessageTypes().get(10);
internal_static_hadoop_hdfs_OpCopyBlockProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_OpCopyBlockProto_descriptor,
new java.lang.String[] { "Header", });
internal_static_hadoop_hdfs_OpBlockChecksumProto_descriptor =
getDescriptor().getMessageTypes().get(11);
internal_static_hadoop_hdfs_OpBlockChecksumProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_OpBlockChecksumProto_descriptor,
new java.lang.String[] { "Header", "BlockChecksumOptions", });
internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_descriptor =
getDescriptor().getMessageTypes().get(12);
internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_descriptor,
new java.lang.String[] { "Header", "Datanodes", "BlockTokens", "EcPolicy", "BlockIndices", "RequestedNumBytes", "BlockChecksumOptions", });
internal_static_hadoop_hdfs_ShortCircuitShmIdProto_descriptor =
getDescriptor().getMessageTypes().get(13);
internal_static_hadoop_hdfs_ShortCircuitShmIdProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_ShortCircuitShmIdProto_descriptor,
new java.lang.String[] { "Hi", "Lo", });
internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_descriptor =
getDescriptor().getMessageTypes().get(14);
internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_descriptor,
new java.lang.String[] { "ShmId", "SlotIdx", });
internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_descriptor =
getDescriptor().getMessageTypes().get(15);
internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_descriptor,
new java.lang.String[] { "Header", "MaxVersion", "SlotId", "SupportsReceiptVerification", });
internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_descriptor =
getDescriptor().getMessageTypes().get(16);
internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_descriptor,
new java.lang.String[] { "SlotId", "TraceInfo", });
internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_descriptor =
getDescriptor().getMessageTypes().get(17);
internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_descriptor,
new java.lang.String[] { "Status", "Error", });
internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_descriptor =
getDescriptor().getMessageTypes().get(18);
internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_descriptor,
new java.lang.String[] { "ClientName", "TraceInfo", });
internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_descriptor =
getDescriptor().getMessageTypes().get(19);
internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_descriptor,
new java.lang.String[] { "Status", "Error", "Id", });
internal_static_hadoop_hdfs_PacketHeaderProto_descriptor =
getDescriptor().getMessageTypes().get(20);
internal_static_hadoop_hdfs_PacketHeaderProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_PacketHeaderProto_descriptor,
new java.lang.String[] { "OffsetInBlock", "Seqno", "LastPacketInBlock", "DataLen", "SyncBlock", });
internal_static_hadoop_hdfs_PipelineAckProto_descriptor =
getDescriptor().getMessageTypes().get(21);
internal_static_hadoop_hdfs_PipelineAckProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_PipelineAckProto_descriptor,
new java.lang.String[] { "Seqno", "Reply", "DownstreamAckTimeNanos", "Flag", });
internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_descriptor =
getDescriptor().getMessageTypes().get(22);
internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_descriptor,
new java.lang.String[] { "Checksum", "ChunkOffset", });
internal_static_hadoop_hdfs_BlockOpResponseProto_descriptor =
getDescriptor().getMessageTypes().get(23);
internal_static_hadoop_hdfs_BlockOpResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_BlockOpResponseProto_descriptor,
new java.lang.String[] { "Status", "FirstBadLink", "ChecksumResponse", "ReadOpChecksumInfo", "Message", "ShortCircuitAccessVersion", });
internal_static_hadoop_hdfs_ClientReadStatusProto_descriptor =
getDescriptor().getMessageTypes().get(24);
internal_static_hadoop_hdfs_ClientReadStatusProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_ClientReadStatusProto_descriptor,
new java.lang.String[] { "Status", });
internal_static_hadoop_hdfs_DNTransferAckProto_descriptor =
getDescriptor().getMessageTypes().get(25);
internal_static_hadoop_hdfs_DNTransferAckProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_DNTransferAckProto_descriptor,
new java.lang.String[] { "Status", });
internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_descriptor =
getDescriptor().getMessageTypes().get(26);
internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_descriptor,
new java.lang.String[] { "BytesPerCrc", "CrcPerBlock", "BlockChecksum", "CrcType", "BlockChecksumOptions", });
internal_static_hadoop_hdfs_OpCustomProto_descriptor =
getDescriptor().getMessageTypes().get(27);
internal_static_hadoop_hdfs_OpCustomProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_hdfs_OpCustomProto_descriptor,
new java.lang.String[] { "CustomId", });
return null;
}
};
com.google.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] {
org.apache.hadoop.security.proto.SecurityProtos.getDescriptor(),
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(),
}, assigner);
}
// @@protoc_insertion_point(outer_class_scope)
}